diff options
Diffstat (limited to 'include')
608 files changed, 15773 insertions, 4233 deletions
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h index 199afc2cd122..387fc821703a 100644 --- a/include/acpi/actbl1.h +++ b/include/acpi/actbl1.h @@ -403,6 +403,8 @@ struct acpi_cdat_dsmas { /* Flags for subtable above */ #define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2) +#define ACPI_CDAT_DSMAS_SHAREABLE (1 << 3) +#define ACPI_CDAT_DSMAS_READ_ONLY (1 << 6) /* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */ diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index d3858eebc255..2e917a8f8bca 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -453,7 +453,7 @@ struct acpi_table_ccel { * IORT - IO Remapping Table * * Conforms to "IO Remapping Table System Software on ARM Platforms", - * Document number: ARM DEN 0049E.e, Sep 2022 + * Document number: ARM DEN 0049E.f, Apr 2024 * ******************************************************************************/ @@ -524,6 +524,7 @@ struct acpi_iort_memory_access { #define ACPI_IORT_MF_COHERENCY (1) #define ACPI_IORT_MF_ATTRIBUTES (1<<1) +#define ACPI_IORT_MF_CANWBS (1<<2) /* * IORT node specific subtables diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h index 9b373d172a77..699c1a37b8e7 100644 --- a/include/acpi/pcc.h +++ b/include/acpi/pcc.h @@ -12,6 +12,7 @@ struct pcc_mbox_chan { struct mbox_chan *mchan; u64 shmem_base_addr; + void __iomem *shmem; u64 shmem_size; u32 latency; u32 max_access_rate; @@ -31,11 +32,13 @@ struct pcc_mbox_chan { #define PCC_CMD_COMPLETION_NOTIFY BIT(0) #define MAX_PCC_SUBSPACES 256 +#define PCC_ACK_FLAG_MASK 0x1 #ifdef CONFIG_PCC extern struct pcc_mbox_chan * pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id); extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan); +extern int pcc_mbox_ioremap(struct mbox_chan *chan); #else static inline struct pcc_mbox_chan * pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id) @@ -43,6 +46,10 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id) return ERR_PTR(-ENODEV); } static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { } +static inline int pcc_mbox_ioremap(struct mbox_chan *chan) +{ + return 0; +}; #endif #endif /* _PCC_H */ diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 565341c826e3..f3249b7df5cb 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -15,7 +15,7 @@ /* ACPICA external files should not include ACPICA headers directly. */ #if !defined(BUILDING_ACPICA) && !defined(_LINUX_ACPI_H) -#error "Please don't include <acpi/acpi.h> directly, include <linux/acpi.h> instead." +#error "Please do not include <acpi/acpi.h> directly, include <linux/acpi.h> instead." #endif #endif diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h index 331670807cf0..cc840537885f 100644 --- a/include/asm-generic/audit_change_attr.h +++ b/include/asm-generic/audit_change_attr.h @@ -11,9 +11,15 @@ __NR_lchown, __NR_fchown, #endif __NR_setxattr, +#ifdef __NR_setxattrat +__NR_setxattrat, +#endif __NR_lsetxattr, __NR_fsetxattr, __NR_removexattr, +#ifdef __NR_removexattrat +__NR_removexattrat, +#endif __NR_lremovexattr, __NR_fremovexattr, #ifdef __NR_fchownat diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h index 64f536b80380..372c320c5043 100644 --- a/include/asm-generic/codetag.lds.h +++ b/include/asm-generic/codetag.lds.h @@ -11,4 +11,23 @@ #define CODETAG_SECTIONS() \ SECTION_WITH_BOUNDARIES(alloc_tags) +/* + * Module codetags which aren't used after module unload, therefore have the + * same lifespan as the module and can be safely unloaded with the module. + */ +#define MOD_CODETAG_SECTIONS() + +#define MOD_SEPARATE_CODETAG_SECTION(_name) \ + .codetag.##_name : { \ + SECTION_WITH_BOUNDARIES(_name) \ + } + +/* + * For codetags which might be used after module unload, therefore might stay + * longer in memory. Each such codetag type has its own section so that we can + * unload them individually once unused. + */ +#define MOD_SEPARATE_CODETAG_SECTIONS() \ + MOD_SEPARATE_CODETAG_SECTION(alloc_tags) + #endif /* __ASM_GENERIC_CODETAG_LDS_H */ diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h index e448ac61430c..76cf237b6e4c 100644 --- a/include/asm-generic/delay.h +++ b/include/asm-generic/delay.h @@ -2,6 +2,9 @@ #ifndef __ASM_GENERIC_DELAY_H #define __ASM_GENERIC_DELAY_H +#include <linux/math.h> +#include <vdso/time64.h> + /* Undefined functions to get compile-time errors */ extern void __bad_udelay(void); extern void __bad_ndelay(void); @@ -12,34 +15,73 @@ extern void __const_udelay(unsigned long xloops); extern void __delay(unsigned long loops); /* - * The weird n/20000 thing suppresses a "comparison is always false due to - * limited range of data type" warning with non-const 8-bit arguments. + * The microseconds/nanosecond delay multiplicators are used to convert a + * constant microseconds/nanoseconds value to a value which can be used by the + * architectures specific implementation to transform it into loops. + */ +#define UDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, USEC_PER_SEC)) +#define NDELAY_CONST_MULT ((unsigned long)DIV_ROUND_UP(1ULL << 32, NSEC_PER_SEC)) + +/* + * The maximum constant udelay/ndelay value picked out of thin air to prevent + * too long constant udelays/ndelays. */ +#define DELAY_CONST_MAX 20000 -/* 0x10c7 is 2**32 / 1000000 (rounded up) */ -#define udelay(n) \ - ({ \ - if (__builtin_constant_p(n)) { \ - if ((n) / 20000 >= 1) \ - __bad_udelay(); \ - else \ - __const_udelay((n) * 0x10c7ul); \ - } else { \ - __udelay(n); \ - } \ - }) - -/* 0x5 is 2**32 / 1000000000 (rounded up) */ -#define ndelay(n) \ - ({ \ - if (__builtin_constant_p(n)) { \ - if ((n) / 20000 >= 1) \ - __bad_ndelay(); \ - else \ - __const_udelay((n) * 5ul); \ - } else { \ - __ndelay(n); \ - } \ - }) +/** + * udelay - Inserting a delay based on microseconds with busy waiting + * @usec: requested delay in microseconds + * + * When delaying in an atomic context ndelay(), udelay() and mdelay() are the + * only valid variants of delaying/sleeping to go with. + * + * When inserting delays in non atomic context which are shorter than the time + * which is required to queue e.g. an hrtimer and to enter then the scheduler, + * it is also valuable to use udelay(). But it is not simple to specify a + * generic threshold for this which will fit for all systems. An approximation + * is a threshold for all delays up to 10 microseconds. + * + * When having a delay which is larger than the architecture specific + * %MAX_UDELAY_MS value, please make sure mdelay() is used. Otherwise a overflow + * risk is given. + * + * Please note that ndelay(), udelay() and mdelay() may return early for several + * reasons (https://lists.openwall.net/linux-kernel/2011/01/09/56): + * + * #. computed loops_per_jiffy too low (due to the time taken to execute the + * timer interrupt.) + * #. cache behaviour affecting the time it takes to execute the loop function. + * #. CPU clock rate changes. + */ +static __always_inline void udelay(unsigned long usec) +{ + if (__builtin_constant_p(usec)) { + if (usec >= DELAY_CONST_MAX) + __bad_udelay(); + else + __const_udelay(usec * UDELAY_CONST_MULT); + } else { + __udelay(usec); + } +} + +/** + * ndelay - Inserting a delay based on nanoseconds with busy waiting + * @nsec: requested delay in nanoseconds + * + * See udelay() for basic information about ndelay() and it's variants. + */ +static __always_inline void ndelay(unsigned long nsec) +{ + if (__builtin_constant_p(nsec)) { + if (nsec >= DELAY_CONST_MAX) + __bad_udelay(); + else + __const_udelay(nsec * NDELAY_CONST_MULT); + } else { + __udelay(nsec); + } +} +#define ndelay(x) ndelay(x) #endif /* __ASM_GENERIC_DELAY_H */ diff --git a/include/asm-generic/div64.h b/include/asm-generic/div64.h index 13f5aa68a455..25e7b4b58dcf 100644 --- a/include/asm-generic/div64.h +++ b/include/asm-generic/div64.h @@ -74,7 +74,8 @@ * do the trick here). \ */ \ uint64_t ___res, ___x, ___t, ___m, ___n = (n); \ - uint32_t ___p, ___bias; \ + uint32_t ___p; \ + bool ___bias = false; \ \ /* determine MSB of b */ \ ___p = 1 << ilog2(___b); \ @@ -87,22 +88,14 @@ ___x = ~0ULL / ___b * ___b - 1; \ \ /* test our ___m with res = m * x / (p << 64) */ \ - ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ - ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ - ___res += (___x & 0xffffffff) * (___m >> 32); \ - ___t = (___res < ___t) ? (1ULL << 32) : 0; \ - ___res = (___res >> 32) + ___t; \ - ___res += (___m >> 32) * (___x >> 32); \ - ___res /= ___p; \ + ___res = (___m & 0xffffffff) * (___x & 0xffffffff); \ + ___t = (___m & 0xffffffff) * (___x >> 32) + (___res >> 32); \ + ___res = (___m >> 32) * (___x >> 32) + (___t >> 32); \ + ___t = (___m >> 32) * (___x & 0xffffffff) + (___t & 0xffffffff);\ + ___res = (___res + (___t >> 32)) / ___p; \ \ - /* Now sanitize and optimize what we've got. */ \ - if (~0ULL % (___b / (___b & -___b)) == 0) { \ - /* special case, can be simplified to ... */ \ - ___n /= (___b & -___b); \ - ___m = ~0ULL / (___b / (___b & -___b)); \ - ___p = 1; \ - ___bias = 1; \ - } else if (___res != ___x / ___b) { \ + /* Now validate what we've got. */ \ + if (___res != ___x / ___b) { \ /* \ * We can't get away without a bias to compensate \ * for bit truncation errors. To avoid it we'd need an \ @@ -111,45 +104,18 @@ * \ * Instead we do m = p / b and n / b = (n * m + m) / p. \ */ \ - ___bias = 1; \ + ___bias = true; \ /* Compute m = (p << 64) / b */ \ ___m = (~0ULL / ___b) * ___p; \ ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ - } else { \ - /* \ - * Reduce m / p, and try to clear bit 31 of m when \ - * possible, otherwise that'll need extra overflow \ - * handling later. \ - */ \ - uint32_t ___bits = -(___m & -___m); \ - ___bits |= ___m >> 32; \ - ___bits = (~___bits) << 1; \ - /* \ - * If ___bits == 0 then setting bit 31 is unavoidable. \ - * Simply apply the maximum possible reduction in that \ - * case. Otherwise the MSB of ___bits indicates the \ - * best reduction we should apply. \ - */ \ - if (!___bits) { \ - ___p /= (___m & -___m); \ - ___m /= (___m & -___m); \ - } else { \ - ___p >>= ilog2(___bits); \ - ___m >>= ilog2(___bits); \ - } \ - /* No bias needed. */ \ - ___bias = 0; \ } \ \ + /* Reduce m / p to help avoid overflow handling later. */ \ + ___p /= (___m & -___m); \ + ___m /= (___m & -___m); \ + \ /* \ - * Now we have a combination of 2 conditions: \ - * \ - * 1) whether or not we need to apply a bias, and \ - * \ - * 2) whether or not there might be an overflow in the cross \ - * product determined by (___m & ((1 << 63) | (1 << 31))). \ - * \ - * Select the best way to do (m_bias + m * n) / (1 << 64). \ + * Perform (m_bias + m * n) / (1 << 64). \ * From now on there will be actual runtime code generated. \ */ \ ___res = __arch_xprod_64(___m, ___n, ___bias); \ @@ -165,47 +131,42 @@ * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 * * The product is a 128-bit value, scaled down to 64 bits. - * Assuming constant propagation to optimize away unused conditional code. + * Hoping for compile-time optimization of conditional code. * Architectures may provide their own optimized assembly implementation. */ -static inline uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) +#ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE +static __always_inline +#else +static inline +#endif +uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias) { uint32_t m_lo = m; uint32_t m_hi = m >> 32; uint32_t n_lo = n; uint32_t n_hi = n >> 32; - uint64_t res; - uint32_t res_lo, res_hi, tmp; - - if (!bias) { - res = ((uint64_t)m_lo * n_lo) >> 32; - } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { - /* there can't be any overflow here */ - res = (m + (uint64_t)m_lo * n_lo) >> 32; + uint64_t x, y; + + /* Determine if overflow handling can be dispensed with. */ + bool no_ovf = __builtin_constant_p(m) && + ((m >> 32) + (m & 0xffffffff) < 0x100000000); + + if (no_ovf) { + x = (uint64_t)m_lo * n_lo + (bias ? m : 0); + x >>= 32; + x += (uint64_t)m_lo * n_hi; + x += (uint64_t)m_hi * n_lo; + x >>= 32; + x += (uint64_t)m_hi * n_hi; } else { - res = m + (uint64_t)m_lo * n_lo; - res_lo = res >> 32; - res_hi = (res_lo < m_hi); - res = res_lo | ((uint64_t)res_hi << 32); + x = (uint64_t)m_lo * n_lo + (bias ? m_lo : 0); + y = (uint64_t)m_lo * n_hi + (uint32_t)(x >> 32) + (bias ? m_hi : 0); + x = (uint64_t)m_hi * n_hi + (uint32_t)(y >> 32); + y = (uint64_t)m_hi * n_lo + (uint32_t)y; + x += (uint32_t)(y >> 32); } - if (!(m & ((1ULL << 63) | (1ULL << 31)))) { - /* there can't be any overflow here */ - res += (uint64_t)m_lo * n_hi; - res += (uint64_t)m_hi * n_lo; - res >>= 32; - } else { - res += (uint64_t)m_lo * n_hi; - tmp = res >> 32; - res += (uint64_t)m_hi * n_lo; - res_lo = res >> 32; - res_hi = (res_lo < tmp); - res = res_lo | ((uint64_t)res_hi << 32); - } - - res += (uint64_t)m_hi * n_hi; - - return res; + return x; } #endif diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 594d5905f615..f42133dae68e 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -42,20 +42,26 @@ static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) return pte_modify(pte, newprot); } +#ifndef __HAVE_ARCH_HUGE_PTE_MKUFFD_WP static inline pte_t huge_pte_mkuffd_wp(pte_t pte) { return huge_pte_wrprotect(pte_mkuffd_wp(pte)); } +#endif +#ifndef __HAVE_ARCH_HUGE_PTE_CLEAR_UFFD_WP static inline pte_t huge_pte_clear_uffd_wp(pte_t pte) { return pte_clear_uffd_wp(pte); } +#endif +#ifndef __HAVE_ARCH_HUGE_PTE_UFFD_WP static inline int huge_pte_uffd_wp(pte_t pte) { return pte_uffd_wp(pte); } +#endif #ifndef __HAVE_ARCH_HUGE_PTE_CLEAR static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, @@ -106,22 +112,17 @@ static inline int huge_pte_none(pte_t pte) #endif /* Please refer to comments above pte_none_mostly() for the usage */ +#ifndef __HAVE_ARCH_HUGE_PTE_NONE_MOSTLY static inline int huge_pte_none_mostly(pte_t pte) { return huge_pte_none(pte) || is_pte_marker(pte); } +#endif #ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE static inline int prepare_hugepage_range(struct file *file, unsigned long addr, unsigned long len) { - struct hstate *h = hstate_file(file); - - if (len & ~huge_page_mask(h)) - return -EINVAL; - if (addr & ~huge_page_mask(h)) - return -EINVAL; - return 0; } #endif diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 80de699bf6af..a5cbbf3e26ec 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -540,6 +540,7 @@ static inline void writesq(volatile void __iomem *addr, const void *buffer, #if !defined(inb) && !defined(_inb) #define _inb _inb +#ifdef CONFIG_HAS_IOPORT static inline u8 _inb(unsigned long addr) { u8 val; @@ -549,10 +550,15 @@ static inline u8 _inb(unsigned long addr) __io_par(val); return val; } +#else +u8 _inb(unsigned long addr) + __compiletime_error("inb()) requires CONFIG_HAS_IOPORT"); +#endif #endif #if !defined(inw) && !defined(_inw) #define _inw _inw +#ifdef CONFIG_HAS_IOPORT static inline u16 _inw(unsigned long addr) { u16 val; @@ -562,10 +568,15 @@ static inline u16 _inw(unsigned long addr) __io_par(val); return val; } +#else +u16 _inw(unsigned long addr) + __compiletime_error("inw() requires CONFIG_HAS_IOPORT"); +#endif #endif #if !defined(inl) && !defined(_inl) #define _inl _inl +#ifdef CONFIG_HAS_IOPORT static inline u32 _inl(unsigned long addr) { u32 val; @@ -575,36 +586,55 @@ static inline u32 _inl(unsigned long addr) __io_par(val); return val; } +#else +u32 _inl(unsigned long addr) + __compiletime_error("inl() requires CONFIG_HAS_IOPORT"); +#endif #endif #if !defined(outb) && !defined(_outb) #define _outb _outb +#ifdef CONFIG_HAS_IOPORT static inline void _outb(u8 value, unsigned long addr) { __io_pbw(); __raw_writeb(value, PCI_IOBASE + addr); __io_paw(); } +#else +void _outb(u8 value, unsigned long addr) + __compiletime_error("outb() requires CONFIG_HAS_IOPORT"); +#endif #endif #if !defined(outw) && !defined(_outw) #define _outw _outw +#ifdef CONFIG_HAS_IOPORT static inline void _outw(u16 value, unsigned long addr) { __io_pbw(); __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr); __io_paw(); } +#else +void _outw(u16 value, unsigned long addr) + __compiletime_error("outw() requires CONFIG_HAS_IOPORT"); +#endif #endif #if !defined(outl) && !defined(_outl) #define _outl _outl +#ifdef CONFIG_HAS_IOPORT static inline void _outl(u32 value, unsigned long addr) { __io_pbw(); __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr); __io_paw(); } +#else +void _outl(u32 value, unsigned long addr) + __compiletime_error("outl() requires CONFIG_HAS_IOPORT"); +#endif #endif #include <linux/logic_pio.h> @@ -688,53 +718,83 @@ static inline void outl_p(u32 value, unsigned long addr) #ifndef insb #define insb insb +#ifdef CONFIG_HAS_IOPORT static inline void insb(unsigned long addr, void *buffer, unsigned int count) { readsb(PCI_IOBASE + addr, buffer, count); } +#else +void insb(unsigned long addr, void *buffer, unsigned int count) + __compiletime_error("insb() requires HAS_IOPORT"); +#endif #endif #ifndef insw #define insw insw +#ifdef CONFIG_HAS_IOPORT static inline void insw(unsigned long addr, void *buffer, unsigned int count) { readsw(PCI_IOBASE + addr, buffer, count); } +#else +void insw(unsigned long addr, void *buffer, unsigned int count) + __compiletime_error("insw() requires HAS_IOPORT"); +#endif #endif #ifndef insl #define insl insl +#ifdef CONFIG_HAS_IOPORT static inline void insl(unsigned long addr, void *buffer, unsigned int count) { readsl(PCI_IOBASE + addr, buffer, count); } +#else +void insl(unsigned long addr, void *buffer, unsigned int count) + __compiletime_error("insl() requires HAS_IOPORT"); +#endif #endif #ifndef outsb #define outsb outsb +#ifdef CONFIG_HAS_IOPORT static inline void outsb(unsigned long addr, const void *buffer, unsigned int count) { writesb(PCI_IOBASE + addr, buffer, count); } +#else +void outsb(unsigned long addr, const void *buffer, unsigned int count) + __compiletime_error("outsb() requires HAS_IOPORT"); +#endif #endif #ifndef outsw #define outsw outsw +#ifdef CONFIG_HAS_IOPORT static inline void outsw(unsigned long addr, const void *buffer, unsigned int count) { writesw(PCI_IOBASE + addr, buffer, count); } +#else +void outsw(unsigned long addr, const void *buffer, unsigned int count) + __compiletime_error("outsw() requires HAS_IOPORT"); +#endif #endif #ifndef outsl #define outsl outsl +#ifdef CONFIG_HAS_IOPORT static inline void outsl(unsigned long addr, const void *buffer, unsigned int count) { writesl(PCI_IOBASE + addr, buffer, count); } +#else +void outsl(unsigned long addr, const void *buffer, unsigned int count) + __compiletime_error("outsl() requires HAS_IOPORT"); +#endif #endif #ifndef insb_p @@ -1151,7 +1211,6 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) #endif #ifndef memset_io -#define memset_io memset_io /** * memset_io Set a range of I/O memory to a constant value * @addr: The beginning of the I/O-memory range to set @@ -1160,15 +1219,10 @@ static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) * * Set a range of I/O memory to a given value. */ -static inline void memset_io(volatile void __iomem *addr, int value, - size_t size) -{ - memset(__io_virt(addr), value, size); -} +void memset_io(volatile void __iomem *addr, int val, size_t count); #endif #ifndef memcpy_fromio -#define memcpy_fromio memcpy_fromio /** * memcpy_fromio Copy a block of data from I/O memory * @dst: The (RAM) destination for the copy @@ -1177,16 +1231,10 @@ static inline void memset_io(volatile void __iomem *addr, int value, * * Copy a block of data from I/O memory. */ -static inline void memcpy_fromio(void *buffer, - const volatile void __iomem *addr, - size_t size) -{ - memcpy(buffer, __io_virt(addr), size); -} +void memcpy_fromio(void *dst, const volatile void __iomem *src, size_t count); #endif #ifndef memcpy_toio -#define memcpy_toio memcpy_toio /** * memcpy_toio Copy a block of data into I/O memory * @dst: The (I/O memory) destination for the copy @@ -1195,11 +1243,7 @@ static inline void memcpy_fromio(void *buffer, * * Copy a block of data to I/O memory. */ -static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer, - size_t size) -{ - memcpy(__io_virt(addr), buffer, size); -} +void memcpy_toio(volatile void __iomem *dst, const void *src, size_t count); #endif extern int devmem_is_allowed(unsigned long pfn); diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h index 6796abe1900e..6d1fb6162ac1 100644 --- a/include/asm-generic/memory_model.h +++ b/include/asm-generic/memory_model.h @@ -64,6 +64,19 @@ static inline int pfn_valid(unsigned long pfn) #define page_to_pfn __page_to_pfn #define pfn_to_page __pfn_to_page +#ifdef CONFIG_DEBUG_VIRTUAL +#define page_to_phys(page) \ +({ \ + unsigned long __pfn = page_to_pfn(page); \ + \ + WARN_ON_ONCE(!pfn_valid(__pfn)); \ + PFN_PHYS(__pfn); \ +}) +#else +#define page_to_phys(page) PFN_PHYS(page_to_pfn(page)) +#endif /* CONFIG_DEBUG_VIRTUAL */ +#define phys_to_page(phys) pfn_to_page(PHYS_PFN(phys)) + #endif /* __ASSEMBLY__ */ #endif diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h index 0655aa5b57b2..bf47cca2c375 100644 --- a/include/asm-generic/qspinlock.h +++ b/include/asm-generic/qspinlock.h @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) } #endif +#ifndef __no_arch_spinlock_redefine /* * Remapping spinlock architecture specific functions to the corresponding * queued spinlock functions. @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock) #define arch_spin_lock(l) queued_spin_lock(l) #define arch_spin_trylock(l) queued_spin_trylock(l) #define arch_spin_unlock(l) queued_spin_unlock(l) +#endif #endif /* __ASM_GENERIC_QSPINLOCK_H */ diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h index 90803a826ba0..970590baf61b 100644 --- a/include/asm-generic/spinlock.h +++ b/include/asm-generic/spinlock.h @@ -1,94 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * 'Generic' ticket-lock implementation. - * - * It relies on atomic_fetch_add() having well defined forward progress - * guarantees under contention. If your architecture cannot provide this, stick - * to a test-and-set lock. - * - * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a - * sub-word of the value. This is generally true for anything LL/SC although - * you'd be hard pressed to find anything useful in architecture specifications - * about this. If your architecture cannot do this you might be better off with - * a test-and-set. - * - * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence - * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with - * a full fence after the spin to upgrade the otherwise-RCpc - * atomic_cond_read_acquire(). - * - * The implementation uses smp_cond_load_acquire() to spin, so if the - * architecture has WFE like instructions to sleep instead of poll for word - * modifications be sure to implement that (see ARM64 for example). - * - */ - #ifndef __ASM_GENERIC_SPINLOCK_H #define __ASM_GENERIC_SPINLOCK_H -#include <linux/atomic.h> -#include <asm-generic/spinlock_types.h> - -static __always_inline void arch_spin_lock(arch_spinlock_t *lock) -{ - u32 val = atomic_fetch_add(1<<16, lock); - u16 ticket = val >> 16; - - if (ticket == (u16)val) - return; - - /* - * atomic_cond_read_acquire() is RCpc, but rather than defining a - * custom cond_read_rcsc() here we just emit a full fence. We only - * need the prior reads before subsequent writes ordering from - * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we - * have no outstanding writes due to the atomic_fetch_add() the extra - * orderings are free. - */ - atomic_cond_read_acquire(lock, ticket == (u16)VAL); - smp_mb(); -} - -static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) -{ - u32 old = atomic_read(lock); - - if ((old >> 16) != (old & 0xffff)) - return false; - - return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ -} - -static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) -{ - u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); - u32 val = atomic_read(lock); - - smp_store_release(ptr, (u16)val + 1); -} - -static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) -{ - u32 val = lock.counter; - - return ((val >> 16) == (val & 0xffff)); -} - -static __always_inline int arch_spin_is_locked(arch_spinlock_t *lock) -{ - arch_spinlock_t val = READ_ONCE(*lock); - - return !arch_spin_value_unlocked(val); -} - -static __always_inline int arch_spin_is_contended(arch_spinlock_t *lock) -{ - u32 val = atomic_read(lock); - - return (s16)((val >> 16) - (val & 0xffff)) > 1; -} - +#include <asm-generic/ticket_spinlock.h> #include <asm/qrwlock.h> #endif /* __ASM_GENERIC_SPINLOCK_H */ diff --git a/include/asm-generic/spinlock_types.h b/include/asm-generic/spinlock_types.h index 8962bb730945..f534aa5de394 100644 --- a/include/asm-generic/spinlock_types.h +++ b/include/asm-generic/spinlock_types.h @@ -3,15 +3,7 @@ #ifndef __ASM_GENERIC_SPINLOCK_TYPES_H #define __ASM_GENERIC_SPINLOCK_TYPES_H -#include <linux/types.h> -typedef atomic_t arch_spinlock_t; - -/* - * qrwlock_types depends on arch_spinlock_t, so we must typedef that before the - * include. - */ -#include <asm/qrwlock_types.h> - -#define __ARCH_SPIN_LOCK_UNLOCKED ATOMIC_INIT(0) +#include <asm-generic/qspinlock_types.h> +#include <asm-generic/qrwlock_types.h> #endif /* __ASM_GENERIC_SPINLOCK_TYPES_H */ diff --git a/include/asm-generic/text-patching.h b/include/asm-generic/text-patching.h new file mode 100644 index 000000000000..2245c641b741 --- /dev/null +++ b/include/asm-generic/text-patching.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_TEXT_PATCHING_H +#define _ASM_GENERIC_TEXT_PATCHING_H + +#endif /* _ASM_GENERIC_TEXT_PATCHING_H */ diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h new file mode 100644 index 000000000000..325779970d8a --- /dev/null +++ b/include/asm-generic/ticket_spinlock.h @@ -0,0 +1,105 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * 'Generic' ticket-lock implementation. + * + * It relies on atomic_fetch_add() having well defined forward progress + * guarantees under contention. If your architecture cannot provide this, stick + * to a test-and-set lock. + * + * It also relies on atomic_fetch_add() being safe vs smp_store_release() on a + * sub-word of the value. This is generally true for anything LL/SC although + * you'd be hard pressed to find anything useful in architecture specifications + * about this. If your architecture cannot do this you might be better off with + * a test-and-set. + * + * It further assumes atomic_*_release() + atomic_*_acquire() is RCpc and hence + * uses atomic_fetch_add() which is RCsc to create an RCsc hot path, along with + * a full fence after the spin to upgrade the otherwise-RCpc + * atomic_cond_read_acquire(). + * + * The implementation uses smp_cond_load_acquire() to spin, so if the + * architecture has WFE like instructions to sleep instead of poll for word + * modifications be sure to implement that (see ARM64 for example). + * + */ + +#ifndef __ASM_GENERIC_TICKET_SPINLOCK_H +#define __ASM_GENERIC_TICKET_SPINLOCK_H + +#include <linux/atomic.h> +#include <asm-generic/spinlock_types.h> + +static __always_inline void ticket_spin_lock(arch_spinlock_t *lock) +{ + u32 val = atomic_fetch_add(1<<16, &lock->val); + u16 ticket = val >> 16; + + if (ticket == (u16)val) + return; + + /* + * atomic_cond_read_acquire() is RCpc, but rather than defining a + * custom cond_read_rcsc() here we just emit a full fence. We only + * need the prior reads before subsequent writes ordering from + * smb_mb(), but as atomic_cond_read_acquire() just emits reads and we + * have no outstanding writes due to the atomic_fetch_add() the extra + * orderings are free. + */ + atomic_cond_read_acquire(&lock->val, ticket == (u16)VAL); + smp_mb(); +} + +static __always_inline bool ticket_spin_trylock(arch_spinlock_t *lock) +{ + u32 old = atomic_read(&lock->val); + + if ((old >> 16) != (old & 0xffff)) + return false; + + return atomic_try_cmpxchg(&lock->val, &old, old + (1<<16)); /* SC, for RCsc */ +} + +static __always_inline void ticket_spin_unlock(arch_spinlock_t *lock) +{ + u16 *ptr = (u16 *)lock + IS_ENABLED(CONFIG_CPU_BIG_ENDIAN); + u32 val = atomic_read(&lock->val); + + smp_store_release(ptr, (u16)val + 1); +} + +static __always_inline int ticket_spin_value_unlocked(arch_spinlock_t lock) +{ + u32 val = lock.val.counter; + + return ((val >> 16) == (val & 0xffff)); +} + +static __always_inline int ticket_spin_is_locked(arch_spinlock_t *lock) +{ + arch_spinlock_t val = READ_ONCE(*lock); + + return !ticket_spin_value_unlocked(val); +} + +static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock) +{ + u32 val = atomic_read(&lock->val); + + return (s16)((val >> 16) - (val & 0xffff)) > 1; +} + +#ifndef __no_arch_spinlock_redefine +/* + * Remapping spinlock architecture specific functions to the corresponding + * ticket spinlock functions. + */ +#define arch_spin_is_locked(l) ticket_spin_is_locked(l) +#define arch_spin_is_contended(l) ticket_spin_is_contended(l) +#define arch_spin_value_unlocked(l) ticket_spin_value_unlocked(l) +#define arch_spin_lock(l) ticket_spin_lock(l) +#define arch_spin_trylock(l) ticket_spin_trylock(l) +#define arch_spin_unlock(l) ticket_spin_unlock(l) +#endif + +#endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */ diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h index c835607f78ae..01dafd604188 100644 --- a/include/asm-generic/vdso/vsyscall.h +++ b/include/asm-generic/vdso/vsyscall.h @@ -12,8 +12,7 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void) #endif /* __arch_get_k_vdso_data */ #ifndef __arch_update_vsyscall -static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata, - struct timekeeper *tk) +static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata) { } #endif /* __arch_update_vsyscall */ diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h index adf91a783b5c..5dcaf4ae904a 100644 --- a/include/asm-generic/vga.h +++ b/include/asm-generic/vga.h @@ -1,25 +1,4 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* - * Access to VGA videoram - * - * (c) 1998 Martin Mares <mj@ucw.cz> - */ #ifndef __ASM_GENERIC_VGA_H #define __ASM_GENERIC_VGA_H - -/* - * On most architectures that support VGA, we can just - * recalculate addresses and then access the videoram - * directly without any black magic. - * - * Everyone else needs to ioremap the address and use - * proper I/O accesses. - */ -#ifndef VGA_MAP_MEM -#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x) -#endif - -#define vga_readb(x) (*(x)) -#define vga_writeb(x, y) (*(y) = (x)) - -#endif /* _ASM_GENERIC_VGA_H */ +#endif /* __ASM_GENERIC_VGA_H */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index eeadbaeccf88..54504013c749 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -95,18 +95,25 @@ * With LTO_CLANG, the linker also splits sections by default, so we need * these macros to combine the sections during the final link. * + * With AUTOFDO_CLANG and PROPELLER_CLANG, by default, the linker splits + * text sections and regroups functions into subsections. + * * RODATA_MAIN is not used because existing code already defines .rodata.x * sections to be brought in with rodata. */ -#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) +#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) || \ +defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG) #define TEXT_MAIN .text .text.[0-9a-zA-Z_]* +#else +#define TEXT_MAIN .text +#endif +#if defined(CONFIG_LD_DEAD_CODE_DATA_ELIMINATION) || defined(CONFIG_LTO_CLANG) #define DATA_MAIN .data .data.[0-9a-zA-Z_]* .data..L* .data..compoundliteral* .data.$__unnamed_* .data.$L* #define SDATA_MAIN .sdata .sdata.[0-9a-zA-Z_]* #define RODATA_MAIN .rodata .rodata.[0-9a-zA-Z_]* .rodata..L* #define BSS_MAIN .bss .bss.[0-9a-zA-Z_]* .bss..L* .bss..compoundliteral* #define SBSS_MAIN .sbss .sbss.[0-9a-zA-Z_]* #else -#define TEXT_MAIN .text #define DATA_MAIN .data #define SDATA_MAIN .sdata #define RODATA_MAIN .rodata @@ -350,9 +357,9 @@ *(.data..decrypted) \ *(.ref.data) \ *(.data..shared_aligned) /* percpu related */ \ - *(.data.unlikely) \ + *(.data..unlikely) \ __start_once = .; \ - *(.data.once) \ + *(.data..once) \ __end_once = .; \ STRUCT_ALIGN(); \ *(__tracepoints) \ @@ -549,24 +556,44 @@ __cpuidle_text_end = .; \ __noinstr_text_end = .; +#define TEXT_SPLIT \ + __split_text_start = .; \ + *(.text.split .text.split.[0-9a-zA-Z_]*) \ + __split_text_end = .; + +#define TEXT_UNLIKELY \ + __unlikely_text_start = .; \ + *(.text.unlikely .text.unlikely.*) \ + __unlikely_text_end = .; + +#define TEXT_HOT \ + __hot_text_start = .; \ + *(.text.hot .text.hot.*) \ + __hot_text_end = .; + /* * .text section. Map to function alignment to avoid address changes * during second ld run in second ld pass when generating System.map * - * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead - * code elimination is enabled, so these sections should be converted - * to use ".." first. + * TEXT_MAIN here will match symbols with a fixed pattern (for example, + * .text.hot or .text.unlikely) if dead code elimination or + * function-section is enabled. Match these symbols first before + * TEXT_MAIN to ensure they are grouped together. + * + * Also placing .text.hot section at the beginning of a page, this + * would help the TLB performance. */ #define TEXT_TEXT \ ALIGN_FUNCTION(); \ - *(.text.hot .text.hot.*) \ - *(TEXT_MAIN .text.fixup) \ - *(.text.unlikely .text.unlikely.*) \ + *(.text.asan.* .text.tsan.*) \ *(.text.unknown .text.unknown.*) \ + TEXT_SPLIT \ + TEXT_UNLIKELY \ + . = ALIGN(PAGE_SIZE); \ + TEXT_HOT \ + *(TEXT_MAIN .text.fixup) \ NOINSTR_TEXT \ - *(.ref.text) \ - *(.text.asan.* .text.tsan.*) - + *(.ref.text) /* sched.text is aling to function alignment to secure we have same * address even at second ld pass when generating System.map */ diff --git a/include/crypto/akcipher.h b/include/crypto/akcipher.h index 18a10cad07aa..cdf7da74bf2f 100644 --- a/include/crypto/akcipher.h +++ b/include/crypto/akcipher.h @@ -12,24 +12,19 @@ #include <linux/crypto.h> /** - * struct akcipher_request - public key request + * struct akcipher_request - public key cipher request * * @base: Common attributes for async crypto requests * @src: Source data - * For verify op this is signature + digest, in that case - * total size of @src is @src_len + @dst_len. - * @dst: Destination data (Should be NULL for verify op) + * @dst: Destination data * @src_len: Size of the input buffer - * For verify op it's size of signature part of @src, this part - * is supposed to be operated by cipher. - * @dst_len: Size of @dst buffer (for all ops except verify). + * @dst_len: Size of @dst buffer * It needs to be at least as big as the expected result * depending on the operation. * After operation it will be updated with the actual size of the * result. * In case of error where the dst sgl size was insufficient, * it will be updated to the size required for the operation. - * For verify op this is size of digest part in @src. * @__ctx: Start of private context data */ struct akcipher_request { @@ -55,15 +50,8 @@ struct crypto_akcipher { }; /** - * struct akcipher_alg - generic public key algorithm + * struct akcipher_alg - generic public key cipher algorithm * - * @sign: Function performs a sign operation as defined by public key - * algorithm. In case of error, where the dst_len was insufficient, - * the req->dst_len will be updated to the size required for the - * operation - * @verify: Function performs a complete verify operation as defined by - * public key algorithm, returning verification status. Requires - * digest value as input parameter. * @encrypt: Function performs an encrypt operation as defined by public key * algorithm. In case of error, where the dst_len was insufficient, * the req->dst_len will be updated to the size required for the @@ -94,8 +82,6 @@ struct crypto_akcipher { * @base: Common crypto API algorithm data structure */ struct akcipher_alg { - int (*sign)(struct akcipher_request *req); - int (*verify)(struct akcipher_request *req); int (*encrypt)(struct akcipher_request *req); int (*decrypt)(struct akcipher_request *req); int (*set_pub_key)(struct crypto_akcipher *tfm, const void *key, @@ -110,9 +96,9 @@ struct akcipher_alg { }; /** - * DOC: Generic Public Key API + * DOC: Generic Public Key Cipher API * - * The Public Key API is used with the algorithms of type + * The Public Key Cipher API is used with the algorithms of type * CRYPTO_ALG_TYPE_AKCIPHER (listed as type "akcipher" in /proc/crypto) */ @@ -243,10 +229,9 @@ static inline void akcipher_request_set_callback(struct akcipher_request *req, * * @req: public key request * @src: ptr to input scatter list - * @dst: ptr to output scatter list or NULL for verify op + * @dst: ptr to output scatter list * @src_len: size of the src input scatter list to be processed - * @dst_len: size of the dst output scatter list or size of signature - * portion in @src for verify op + * @dst_len: size of the dst output scatter list */ static inline void akcipher_request_set_crypt(struct akcipher_request *req, struct scatterlist *src, @@ -348,44 +333,6 @@ int crypto_akcipher_sync_decrypt(struct crypto_akcipher *tfm, void *dst, unsigned int dlen); /** - * crypto_akcipher_sign() - Invoke public key sign operation - * - * Function invokes the specific public key sign operation for a given - * public key algorithm - * - * @req: asymmetric key request - * - * Return: zero on success; error code in case of error - */ -static inline int crypto_akcipher_sign(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - return crypto_akcipher_alg(tfm)->sign(req); -} - -/** - * crypto_akcipher_verify() - Invoke public key signature verification - * - * Function invokes the specific public key signature verification operation - * for a given public key algorithm. - * - * @req: asymmetric key request - * - * Note: req->dst should be NULL, req->src should point to SG of size - * (req->src_size + req->dst_size), containing signature (of req->src_size - * length) with appended digest (of req->dst_size length). - * - * Return: zero on verification success; error code in case of error. - */ -static inline int crypto_akcipher_verify(struct akcipher_request *req) -{ - struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); - - return crypto_akcipher_alg(tfm)->verify(req); -} - -/** * crypto_akcipher_set_pub_key() - Invoke set public key operation * * Function invokes the algorithm specific set key function, which knows diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h index a0fba4b2eccf..14ee62bc52b6 100644 --- a/include/crypto/internal/akcipher.h +++ b/include/crypto/internal/akcipher.h @@ -124,7 +124,7 @@ static inline struct akcipher_alg *crypto_spawn_akcipher_alg( /** * crypto_register_akcipher() -- Register public key algorithm * - * Function registers an implementation of a public key verify algorithm + * Function registers an implementation of a public key cipher algorithm * * @alg: algorithm definition * @@ -135,7 +135,7 @@ int crypto_register_akcipher(struct akcipher_alg *alg); /** * crypto_unregister_akcipher() -- Unregister public key algorithm * - * Function unregisters an implementation of a public key verify algorithm + * Function unregisters an implementation of a public key cipher algorithm * * @alg: algorithm definition */ diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h index 065f00e4bf40..57cd75242141 100644 --- a/include/crypto/internal/ecc.h +++ b/include/crypto/internal/ecc.h @@ -42,6 +42,18 @@ #define ECC_POINT_INIT(x, y, ndigits) (struct ecc_point) { x, y, ndigits } +/* + * The integers r and s making up the signature are expected to be + * formatted as two consecutive u64 arrays of size ECC_MAX_BYTES. + * The bytes within each u64 digit are in native endianness, + * but the order of the u64 digits themselves is little endian. + * This format allows direct use by internal vli_*() functions. + */ +struct ecdsa_raw_sig { + u64 r[ECC_MAX_DIGITS]; + u64 s[ECC_MAX_DIGITS]; +}; + /** * ecc_swap_digits() - Copy ndigits from big endian array to native array * @in: Input array @@ -293,4 +305,6 @@ void ecc_point_mult_shamir(const struct ecc_point *result, const u64 *y, const struct ecc_point *q, const struct ecc_curve *curve); +extern struct crypto_template ecdsa_x962_tmpl; +extern struct crypto_template ecdsa_p1363_tmpl; #endif diff --git a/include/crypto/internal/rsa.h b/include/crypto/internal/rsa.h index e870133f4b77..071a1951b992 100644 --- a/include/crypto/internal/rsa.h +++ b/include/crypto/internal/rsa.h @@ -8,6 +8,7 @@ #ifndef _RSA_HELPER_ #define _RSA_HELPER_ #include <linux/types.h> +#include <crypto/akcipher.h> /** * rsa_key - RSA key structure @@ -53,5 +54,33 @@ int rsa_parse_pub_key(struct rsa_key *rsa_key, const void *key, int rsa_parse_priv_key(struct rsa_key *rsa_key, const void *key, unsigned int key_len); +#define RSA_PUB (true) +#define RSA_PRIV (false) + +static inline int rsa_set_key(struct crypto_akcipher *child, + unsigned int *key_size, bool is_pubkey, + const void *key, unsigned int keylen) +{ + int err; + + *key_size = 0; + + if (is_pubkey) + err = crypto_akcipher_set_pub_key(child, key, keylen); + else + err = crypto_akcipher_set_priv_key(child, key, keylen); + if (err) + return err; + + /* Find out new modulus size from rsa implementation */ + err = crypto_akcipher_maxsize(child); + if (err > PAGE_SIZE) + return -ENOTSUPP; + + *key_size = err; + return 0; +} + extern struct crypto_template rsa_pkcs1pad_tmpl; +extern struct crypto_template rsassa_pkcs1_tmpl; #endif diff --git a/include/crypto/internal/sig.h b/include/crypto/internal/sig.h index 97cb26ef8115..b16648c1a986 100644 --- a/include/crypto/internal/sig.h +++ b/include/crypto/internal/sig.h @@ -10,8 +10,88 @@ #include <crypto/algapi.h> #include <crypto/sig.h> +struct sig_instance { + void (*free)(struct sig_instance *inst); + union { + struct { + char head[offsetof(struct sig_alg, base)]; + struct crypto_instance base; + }; + struct sig_alg alg; + }; +}; + +struct crypto_sig_spawn { + struct crypto_spawn base; +}; + static inline void *crypto_sig_ctx(struct crypto_sig *tfm) { return crypto_tfm_ctx(&tfm->base); } + +/** + * crypto_register_sig() -- Register public key signature algorithm + * + * Function registers an implementation of a public key signature algorithm + * + * @alg: algorithm definition + * + * Return: zero on success; error code in case of error + */ +int crypto_register_sig(struct sig_alg *alg); + +/** + * crypto_unregister_sig() -- Unregister public key signature algorithm + * + * Function unregisters an implementation of a public key signature algorithm + * + * @alg: algorithm definition + */ +void crypto_unregister_sig(struct sig_alg *alg); + +int sig_register_instance(struct crypto_template *tmpl, + struct sig_instance *inst); + +static inline struct sig_instance *sig_instance(struct crypto_instance *inst) +{ + return container_of(&inst->alg, struct sig_instance, alg.base); +} + +static inline struct sig_instance *sig_alg_instance(struct crypto_sig *tfm) +{ + return sig_instance(crypto_tfm_alg_instance(&tfm->base)); +} + +static inline struct crypto_instance *sig_crypto_instance(struct sig_instance + *inst) +{ + return container_of(&inst->alg.base, struct crypto_instance, alg); +} + +static inline void *sig_instance_ctx(struct sig_instance *inst) +{ + return crypto_instance_ctx(sig_crypto_instance(inst)); +} + +int crypto_grab_sig(struct crypto_sig_spawn *spawn, + struct crypto_instance *inst, + const char *name, u32 type, u32 mask); + +static inline struct crypto_sig *crypto_spawn_sig(struct crypto_sig_spawn + *spawn) +{ + return crypto_spawn_tfm2(&spawn->base); +} + +static inline void crypto_drop_sig(struct crypto_sig_spawn *spawn) +{ + crypto_drop_spawn(&spawn->base); +} + +static inline struct sig_alg *crypto_spawn_sig_alg(struct crypto_sig_spawn + *spawn) +{ + return container_of(spawn->base.alg, struct sig_alg, base); +} #endif diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h index b7f308977c84..81098e00c08f 100644 --- a/include/crypto/public_key.h +++ b/include/crypto/public_key.h @@ -104,9 +104,6 @@ static inline int restrict_link_by_digsig(struct key *dest_keyring, extern int query_asymmetric_key(const struct kernel_pkey_params *, struct kernel_pkey_query *); -extern int encrypt_blob(struct kernel_pkey_params *, const void *, void *); -extern int decrypt_blob(struct kernel_pkey_params *, const void *, void *); -extern int create_signature(struct kernel_pkey_params *, const void *, void *); extern int verify_signature(const struct key *, const struct public_key_signature *); diff --git a/include/crypto/sig.h b/include/crypto/sig.h index d25186bb2be3..cff41ad93824 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -20,6 +20,56 @@ struct crypto_sig { }; /** + * struct sig_alg - generic public key signature algorithm + * + * @sign: Function performs a sign operation as defined by public key + * algorithm. Optional. + * @verify: Function performs a complete verify operation as defined by + * public key algorithm, returning verification status. Optional. + * @set_pub_key: Function invokes the algorithm specific set public key + * function, which knows how to decode and interpret + * the BER encoded public key and parameters. Mandatory. + * @set_priv_key: Function invokes the algorithm specific set private key + * function, which knows how to decode and interpret + * the BER encoded private key and parameters. Optional. + * @key_size: Function returns key size. Mandatory. + * @digest_size: Function returns maximum digest size. Optional. + * @max_size: Function returns maximum signature size. Optional. + * @init: Initialize the cryptographic transformation object. + * This function is used to initialize the cryptographic + * transformation object. This function is called only once at + * the instantiation time, right after the transformation context + * was allocated. In case the cryptographic hardware has some + * special requirements which need to be handled by software, this + * function shall check for the precise requirement of the + * transformation and put any software fallbacks in place. + * @exit: Deinitialize the cryptographic transformation object. This is a + * counterpart to @init, used to remove various changes set in + * @init. + * + * @base: Common crypto API algorithm data structure + */ +struct sig_alg { + int (*sign)(struct crypto_sig *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen); + int (*verify)(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen); + int (*set_pub_key)(struct crypto_sig *tfm, + const void *key, unsigned int keylen); + int (*set_priv_key)(struct crypto_sig *tfm, + const void *key, unsigned int keylen); + unsigned int (*key_size)(struct crypto_sig *tfm); + unsigned int (*digest_size)(struct crypto_sig *tfm); + unsigned int (*max_size)(struct crypto_sig *tfm); + int (*init)(struct crypto_sig *tfm); + void (*exit)(struct crypto_sig *tfm); + + struct crypto_alg base; +}; + +/** * DOC: Generic Public Key Signature API * * The Public Key Signature API is used with the algorithms of type @@ -47,6 +97,21 @@ static inline struct crypto_tfm *crypto_sig_tfm(struct crypto_sig *tfm) return &tfm->base; } +static inline struct crypto_sig *__crypto_sig_tfm(struct crypto_tfm *tfm) +{ + return container_of(tfm, struct crypto_sig, base); +} + +static inline struct sig_alg *__crypto_sig_alg(struct crypto_alg *alg) +{ + return container_of(alg, struct sig_alg, base); +} + +static inline struct sig_alg *crypto_sig_alg(struct crypto_sig *tfm) +{ + return __crypto_sig_alg(crypto_sig_tfm(tfm)->__crt_alg); +} + /** * crypto_free_sig() - free signature tfm handle * @@ -60,16 +125,55 @@ static inline void crypto_free_sig(struct crypto_sig *tfm) } /** - * crypto_sig_maxsize() - Get len for output buffer + * crypto_sig_keysize() - Get key size + * + * Function returns the key size in bytes. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you may end up + * in a NULL dereference. + * + * @tfm: signature tfm handle allocated with crypto_alloc_sig() + */ +static inline unsigned int crypto_sig_keysize(struct crypto_sig *tfm) +{ + struct sig_alg *alg = crypto_sig_alg(tfm); + + return alg->key_size(tfm); +} + +/** + * crypto_sig_digestsize() - Get maximum digest size + * + * Function returns the maximum digest size in bytes. + * Function assumes that the key is already set in the transformation. If this + * function is called without a setkey or with a failed setkey, you may end up + * in a NULL dereference. + * + * @tfm: signature tfm handle allocated with crypto_alloc_sig() + */ +static inline unsigned int crypto_sig_digestsize(struct crypto_sig *tfm) +{ + struct sig_alg *alg = crypto_sig_alg(tfm); + + return alg->digest_size(tfm); +} + +/** + * crypto_sig_maxsize() - Get maximum signature size * - * Function returns the dest buffer size required for a given key. + * Function returns the maximum signature size in bytes. * Function assumes that the key is already set in the transformation. If this - * function is called without a setkey or with a failed setkey, you will end up + * function is called without a setkey or with a failed setkey, you may end up * in a NULL dereference. * * @tfm: signature tfm handle allocated with crypto_alloc_sig() */ -int crypto_sig_maxsize(struct crypto_sig *tfm); +static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm) +{ + struct sig_alg *alg = crypto_sig_alg(tfm); + + return alg->max_size(tfm); +} /** * crypto_sig_sign() - Invoke signing operation @@ -84,9 +188,14 @@ int crypto_sig_maxsize(struct crypto_sig *tfm); * * Return: zero on success; error code in case of error */ -int crypto_sig_sign(struct crypto_sig *tfm, - const void *src, unsigned int slen, - void *dst, unsigned int dlen); +static inline int crypto_sig_sign(struct crypto_sig *tfm, + const void *src, unsigned int slen, + void *dst, unsigned int dlen) +{ + struct sig_alg *alg = crypto_sig_alg(tfm); + + return alg->sign(tfm, src, slen, dst, dlen); +} /** * crypto_sig_verify() - Invoke signature verification @@ -102,9 +211,14 @@ int crypto_sig_sign(struct crypto_sig *tfm, * * Return: zero on verification success; error code in case of error. */ -int crypto_sig_verify(struct crypto_sig *tfm, - const void *src, unsigned int slen, - const void *digest, unsigned int dlen); +static inline int crypto_sig_verify(struct crypto_sig *tfm, + const void *src, unsigned int slen, + const void *digest, unsigned int dlen) +{ + struct sig_alg *alg = crypto_sig_alg(tfm); + + return alg->verify(tfm, src, slen, digest, dlen); +} /** * crypto_sig_set_pubkey() - Invoke set public key operation @@ -119,8 +233,13 @@ int crypto_sig_verify(struct crypto_sig *tfm, * * Return: zero on success; error code in case of error */ -int crypto_sig_set_pubkey(struct crypto_sig *tfm, - const void *key, unsigned int keylen); +static inline int crypto_sig_set_pubkey(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct sig_alg *alg = crypto_sig_alg(tfm); + + return alg->set_pub_key(tfm, key, keylen); +} /** * crypto_sig_set_privkey() - Invoke set private key operation @@ -135,6 +254,11 @@ int crypto_sig_set_pubkey(struct crypto_sig *tfm, * * Return: zero on success; error code in case of error */ -int crypto_sig_set_privkey(struct crypto_sig *tfm, - const void *key, unsigned int keylen); +static inline int crypto_sig_set_privkey(struct crypto_sig *tfm, + const void *key, unsigned int keylen) +{ + struct sig_alg *alg = crypto_sig_alg(tfm); + + return alg->set_priv_key(tfm, key, keylen); +} #endif diff --git a/include/drm/bridge/dw_hdmi_qp.h b/include/drm/bridge/dw_hdmi_qp.h new file mode 100644 index 000000000000..e9be6d507ad9 --- /dev/null +++ b/include/drm/bridge/dw_hdmi_qp.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (c) 2021-2022 Rockchip Electronics Co., Ltd. + * Copyright (c) 2024 Collabora Ltd. + */ + +#ifndef __DW_HDMI_QP__ +#define __DW_HDMI_QP__ + +struct device; +struct drm_encoder; +struct dw_hdmi_qp; +struct platform_device; + +struct dw_hdmi_qp_phy_ops { + int (*init)(struct dw_hdmi_qp *hdmi, void *data); + void (*disable)(struct dw_hdmi_qp *hdmi, void *data); + enum drm_connector_status (*read_hpd)(struct dw_hdmi_qp *hdmi, void *data); + void (*setup_hpd)(struct dw_hdmi_qp *hdmi, void *data); +}; + +struct dw_hdmi_qp_plat_data { + const struct dw_hdmi_qp_phy_ops *phy_ops; + void *phy_data; + int main_irq; +}; + +struct dw_hdmi_qp *dw_hdmi_qp_bind(struct platform_device *pdev, + struct drm_encoder *encoder, + const struct dw_hdmi_qp_plat_data *plat_data); +void dw_hdmi_qp_resume(struct device *dev, struct dw_hdmi_qp *hdmi); +#endif /* __DW_HDMI_QP__ */ diff --git a/include/drm/bridge/imx.h b/include/drm/bridge/imx.h new file mode 100644 index 000000000000..b93f719fe0e7 --- /dev/null +++ b/include/drm/bridge/imx.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright (C) 2012 Sascha Hauer, Pengutronix + */ + +#ifndef DRM_IMX_BRIDGE_H +#define DRM_IMX_BRIDGE_H + +struct device; +struct device_node; +struct drm_bridge; + +struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, + struct device_node *np, + int type); + +#endif diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h deleted file mode 100644 index cbe33b49fd5d..000000000000 --- a/include/drm/drm_aperture.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: MIT */ - -#ifndef _DRM_APERTURE_H_ -#define _DRM_APERTURE_H_ - -#include <linux/types.h> - -struct drm_device; -struct drm_driver; -struct pci_dev; - -int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base, - resource_size_t size); - -int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - const struct drm_driver *req_driver); - -int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, - const struct drm_driver *req_driver); - -/** - * drm_aperture_remove_framebuffers - remove all existing framebuffers - * @req_driver: requesting DRM driver - * - * This function removes all graphics device drivers. Use this function on systems - * that can have their framebuffer located anywhere in memory. - * - * Returns: - * 0 on success, or a negative errno code otherwise - */ -static inline int -drm_aperture_remove_framebuffers(const struct drm_driver *req_driver) -{ - return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, - req_driver); -} - -#endif diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 75019d16be64..e8d735b7f6a4 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -803,6 +803,11 @@ struct drm_bridge { */ bool interlace_allowed; /** + * @ycbcr_420_allowed: Indicate that the bridge can handle YCbCr 420 + * output. + */ + bool ycbcr_420_allowed; + /** * @pre_enable_prev_first: The bridge requires that the prev * bridge @pre_enable function is called before its @pre_enable, * and conversely for post_disable. This is most frequently a diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h index bc0e66f9c425..3b13cf29ed55 100644 --- a/include/drm/drm_client.h +++ b/include/drm/drm_client.h @@ -63,6 +63,34 @@ struct drm_client_funcs { * This callback is optional. */ int (*hotplug)(struct drm_client_dev *client); + + /** + * @suspend: + * + * Called when suspending the device. + * + * This callback is optional. + * + * FIXME: Some callers hold the console lock when invoking this + * function. This interferes with fbdev emulation, which + * also tries to acquire the lock. Push the console lock + * into the callback and remove 'holds_console_lock'. + */ + int (*suspend)(struct drm_client_dev *client, bool holds_console_lock); + + /** + * @resume: + * + * Called when resuming the device from suspend. + * + * This callback is optional. + * + * FIXME: Some callers hold the console lock when invoking this + * function. This interferes with fbdev emulation, which + * also tries to acquire the lock. Push the console lock + * into the callback and remove 'holds_console_lock'. + */ + int (*resume)(struct drm_client_dev *client, bool holds_console_lock); }; /** @@ -108,6 +136,13 @@ struct drm_client_dev { struct drm_mode_set *modesets; /** + * @suspended: + * + * The client has been suspended. + */ + bool suspended; + + /** * @hotplug_failed: * * Set by client hotplug helpers if the hotplugging failed @@ -121,10 +156,6 @@ int drm_client_init(struct drm_device *dev, struct drm_client_dev *client, void drm_client_release(struct drm_client_dev *client); void drm_client_register(struct drm_client_dev *client); -void drm_client_dev_unregister(struct drm_device *dev); -void drm_client_dev_hotplug(struct drm_device *dev); -void drm_client_dev_restore(struct drm_device *dev); - /** * struct drm_client_buffer - DRM client buffer */ @@ -205,6 +236,4 @@ int drm_client_modeset_dpms(struct drm_client_dev *client, int mode); drm_for_each_connector_iter(connector, iter) \ if (connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK) -void drm_client_debugfs_init(struct drm_device *dev); - #endif diff --git a/include/drm/drm_client_event.h b/include/drm/drm_client_event.h new file mode 100644 index 000000000000..99863554b055 --- /dev/null +++ b/include/drm/drm_client_event.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 or MIT */ + +#ifndef _DRM_CLIENT_EVENT_H_ +#define _DRM_CLIENT_EVENT_H_ + +struct drm_device; + +#if defined(CONFIG_DRM_CLIENT) +void drm_client_dev_unregister(struct drm_device *dev); +void drm_client_dev_hotplug(struct drm_device *dev); +void drm_client_dev_restore(struct drm_device *dev); +void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock); +void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock); +#else +static inline void drm_client_dev_unregister(struct drm_device *dev) +{ } +static inline void drm_client_dev_hotplug(struct drm_device *dev) +{ } +static inline void drm_client_dev_restore(struct drm_device *dev) +{ } +static inline void drm_client_dev_suspend(struct drm_device *dev, bool holds_console_lock) +{ } +static inline void drm_client_dev_resume(struct drm_device *dev, bool holds_console_lock) +{ } +#endif + +#endif diff --git a/include/drm/drm_client_setup.h b/include/drm/drm_client_setup.h new file mode 100644 index 000000000000..46aab3fb46be --- /dev/null +++ b/include/drm/drm_client_setup.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_CLIENT_SETUP_H +#define DRM_CLIENT_SETUP_H + +#include <linux/types.h> + +struct drm_device; +struct drm_format_info; + +#if defined(CONFIG_DRM_CLIENT_SETUP) +void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format); +void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc); +void drm_client_setup_with_color_mode(struct drm_device *dev, unsigned int color_mode); +#else +static inline void drm_client_setup(struct drm_device *dev, + const struct drm_format_info *format) +{ } +static inline void drm_client_setup_with_fourcc(struct drm_device *dev, u32 fourcc) +{ } +static inline void drm_client_setup_with_color_mode(struct drm_device *dev, + unsigned int color_mode) +{ } +#endif + +#endif diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h index 02ea4e3248fd..1bbbcb8e2d23 100644 --- a/include/drm/drm_drv.h +++ b/include/drm/drm_drv.h @@ -34,6 +34,8 @@ #include <drm/drm_device.h> +struct drm_fb_helper; +struct drm_fb_helper_surface_size; struct drm_file; struct drm_gem_object; struct drm_master; @@ -367,6 +369,22 @@ struct drm_driver { uint64_t *offset); /** + * @fbdev_probe: + * + * Allocates and initialize the fb_info structure for fbdev emulation. + * Furthermore it also needs to allocate the DRM framebuffer used to + * back the fbdev. + * + * This callback is mandatory for fbdev support. + * + * Returns: + * + * 0 on success ot a negative error code otherwise. + */ + int (*fbdev_probe)(struct drm_fb_helper *fbdev_helper, + struct drm_fb_helper_surface_size *sizes); + + /** * @show_fdinfo: * * Print device specific fdinfo. See Documentation/gpu/drm-usage-stats.rst. diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h index 699f2790b9ac..8426b9921a03 100644 --- a/include/drm/drm_fb_helper.h +++ b/include/drm/drm_fb_helper.h @@ -256,7 +256,9 @@ void drm_fb_helper_fill_info(struct fb_info *info, void drm_fb_helper_damage_range(struct fb_info *info, off_t off, size_t len); void drm_fb_helper_damage_area(struct fb_info *info, u32 x, u32 y, u32 width, u32 height); +#ifdef CONFIG_FB_DEFERRED_IO void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagereflist); +#endif void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend); void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper, @@ -361,10 +363,12 @@ static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd, return 0; } +#ifdef CONFIG_FB_DEFERRED_IO static inline void drm_fb_helper_deferred_io(struct fb_info *info, struct list_head *pagelist) { } +#endif static inline void drm_fb_helper_set_suspend(struct drm_fb_helper *fb_helper, bool suspend) diff --git a/include/drm/drm_fbdev_client.h b/include/drm/drm_fbdev_client.h new file mode 100644 index 000000000000..e11a5614f127 --- /dev/null +++ b/include/drm/drm_fbdev_client.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef DRM_FBDEV_CLIENT_H +#define DRM_FBDEV_CLIENT_H + +struct drm_device; +struct drm_format_info; + +#ifdef CONFIG_DRM_FBDEV_EMULATION +int drm_fbdev_client_setup(struct drm_device *dev, const struct drm_format_info *format); +#else +static inline int drm_fbdev_client_setup(struct drm_device *dev, + const struct drm_format_info *format) +{ + return 0; +} +#endif + +#endif diff --git a/include/drm/drm_fbdev_dma.h b/include/drm/drm_fbdev_dma.h index 2da7ee784133..fb3f2a9aa01a 100644 --- a/include/drm/drm_fbdev_dma.h +++ b/include/drm/drm_fbdev_dma.h @@ -3,13 +3,18 @@ #ifndef DRM_FBDEV_DMA_H #define DRM_FBDEV_DMA_H -struct drm_device; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; #ifdef CONFIG_DRM_FBDEV_EMULATION -void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp); +int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +#define DRM_FBDEV_DMA_DRIVER_OPS \ + .fbdev_probe = drm_fbdev_dma_driver_fbdev_probe #else -static inline void drm_fbdev_dma_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ } +#define DRM_FBDEV_DMA_DRIVER_OPS \ + .fbdev_probe = NULL #endif #endif diff --git a/include/drm/drm_fbdev_shmem.h b/include/drm/drm_fbdev_shmem.h index fb43cadd1950..2fc708964d75 100644 --- a/include/drm/drm_fbdev_shmem.h +++ b/include/drm/drm_fbdev_shmem.h @@ -3,13 +3,18 @@ #ifndef DRM_FBDEV_SHMEM_H #define DRM_FBDEV_SHMEM_H -struct drm_device; +struct drm_fb_helper; +struct drm_fb_helper_surface_size; #ifdef CONFIG_DRM_FBDEV_EMULATION -void drm_fbdev_shmem_setup(struct drm_device *dev, unsigned int preferred_bpp); +int drm_fbdev_shmem_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +#define DRM_FBDEV_SHMEM_DRIVER_OPS \ + .fbdev_probe = drm_fbdev_shmem_driver_fbdev_probe #else -static inline void drm_fbdev_shmem_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ } +#define DRM_FBDEV_SHMEM_DRIVER_OPS \ + .fbdev_probe = NULL #endif #endif diff --git a/include/drm/drm_fbdev_ttm.h b/include/drm/drm_fbdev_ttm.h index 9e6c3bdf3537..ad4a10bb4c78 100644 --- a/include/drm/drm_fbdev_ttm.h +++ b/include/drm/drm_fbdev_ttm.h @@ -3,13 +3,20 @@ #ifndef DRM_FBDEV_TTM_H #define DRM_FBDEV_TTM_H -struct drm_device; +#include <linux/stddef.h> + +struct drm_fb_helper; +struct drm_fb_helper_surface_size; #ifdef CONFIG_DRM_FBDEV_EMULATION -void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp); +int drm_fbdev_ttm_driver_fbdev_probe(struct drm_fb_helper *fb_helper, + struct drm_fb_helper_surface_size *sizes); + +#define DRM_FBDEV_TTM_DRIVER_OPS \ + .fbdev_probe = drm_fbdev_ttm_driver_fbdev_probe #else -static inline void drm_fbdev_ttm_setup(struct drm_device *dev, unsigned int preferred_bpp) -{ } +#define DRM_FBDEV_TTM_DRIVER_OPS \ + .fbdev_probe = NULL #endif #endif diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h index 8c0030c77308..f0ef32e9fa5e 100644 --- a/include/drm/drm_file.h +++ b/include/drm/drm_file.h @@ -388,6 +388,18 @@ struct drm_file { * Per-file buffer caches used by the PRIME buffer sharing code. */ struct drm_prime_file_private prime; + + /** + * @client_name: + * + * Userspace-provided name; useful for accounting and debugging. + */ + const char *client_name; + + /** + * @client_name_lock: Protects @client_name. + */ + struct mutex client_name_lock; }; /** diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h index ccf91daa4307..c3f4405d6662 100644 --- a/include/drm/drm_fourcc.h +++ b/include/drm/drm_fourcc.h @@ -313,6 +313,7 @@ drm_get_format_info(struct drm_device *dev, uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth); uint32_t drm_driver_legacy_fb_format(struct drm_device *dev, uint32_t bpp, uint32_t depth); +uint32_t drm_driver_color_mode_format(struct drm_device *dev, unsigned int color_mode); unsigned int drm_format_info_block_width(const struct drm_format_info *info, int plane); unsigned int drm_format_info_block_height(const struct drm_format_info *info, diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index d8b86df2ec0d..5b8b1b059d32 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -473,6 +473,9 @@ void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct kref *kref); int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); +int drm_gem_object_init_with_mnt(struct drm_device *dev, + struct drm_gem_object *obj, size_t size, + struct vfsmount *gemfs); void drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); void drm_gem_private_object_fini(struct drm_gem_object *obj); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index efbc9f27312b..d22e3fb53631 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -97,6 +97,9 @@ struct drm_gem_shmem_object { container_of(obj, struct drm_gem_shmem_object, base) struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); +struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev, + size_t size, + struct vfsmount *gemfs); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h index 9a73f786f4ad..00830b49a3ff 100644 --- a/include/drm/drm_gem_vram_helper.h +++ b/include/drm/drm_gem_vram_helper.h @@ -17,7 +17,6 @@ struct drm_mode_create_dumb; struct drm_plane; struct drm_plane_state; -struct drm_simple_display_pipe; struct filp; struct vm_area_struct; @@ -137,18 +136,6 @@ drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, \ .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb -/* - * Helpers for struct drm_simple_display_pipe_funcs - */ - -int drm_gem_vram_simple_display_pipe_prepare_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state); - -void drm_gem_vram_simple_display_pipe_cleanup_fb( - struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state); - /** * define DRM_GEM_VRAM_DRIVER - default callback functions for * &struct drm_driver diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h index f725f8654611..94400a78031f 100644 --- a/include/drm/drm_mipi_dsi.h +++ b/include/drm/drm_mipi_dsi.h @@ -280,6 +280,8 @@ void mipi_dsi_compression_mode_ext_multi(struct mipi_dsi_multi_context *ctx, bool enable, enum mipi_dsi_compression_algo algo, unsigned int pps_selector); +void mipi_dsi_compression_mode_multi(struct mipi_dsi_multi_context *ctx, + bool enable); void mipi_dsi_picture_parameter_set_multi(struct mipi_dsi_multi_context *ctx, const struct drm_dsc_picture_parameter_set *pps); diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h index 02d1cdd7f798..7f0256dae3f1 100644 --- a/include/drm/drm_of.h +++ b/include/drm/drm_of.h @@ -52,6 +52,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, struct drm_bridge **bridge); int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, const struct device_node *port2); +int drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1, + struct device_node *port2); int drm_of_lvds_get_data_mapping(const struct device_node *port); int drm_of_get_data_lanes_count(const struct device_node *endpoint, const unsigned int min, const unsigned int max); @@ -110,6 +112,13 @@ drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1, } static inline int +drm_of_lvds_get_dual_link_pixel_order_sink(struct device_node *port1, + struct device_node *port2) +{ + return -EINVAL; +} + +static inline int drm_of_lvds_get_data_mapping(const struct device_node *port) { return -EINVAL; diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h index 54085d5d05c3..f4e1fa9ae607 100644 --- a/include/drm/drm_panic.h +++ b/include/drm/drm_panic.h @@ -64,6 +64,8 @@ struct drm_scanout_buffer { }; +#ifdef CONFIG_DRM_PANIC + /** * drm_panic_trylock - try to enter the panic printing critical section * @dev: struct drm_device @@ -149,4 +151,16 @@ struct drm_scanout_buffer { #define drm_panic_unlock(dev, flags) \ raw_spin_unlock_irqrestore(&(dev)->mode_config.panic_lock, flags) +#else + +static inline bool drm_panic_trylock(struct drm_device *dev, unsigned long flags) +{ + return true; +} + +static inline void drm_panic_lock(struct drm_device *dev, unsigned long flags) {} +static inline void drm_panic_unlock(struct drm_device *dev, unsigned long flags) {} + +#endif + #endif /* __DRM_PANIC_H__ */ diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h index d2676831d765..b3906dc04388 100644 --- a/include/drm/drm_print.h +++ b/include/drm/drm_print.h @@ -177,6 +177,10 @@ struct drm_printer { void *arg; const void *origin; const char *prefix; + struct { + unsigned int series; + unsigned int counter; + } line; enum drm_debug_category category; }; @@ -187,6 +191,7 @@ void __drm_puts_seq_file(struct drm_printer *p, const char *str); void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_dbg(struct drm_printer *p, struct va_format *vaf); void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf); +void __drm_printfn_line(struct drm_printer *p, struct va_format *vaf); __printf(2, 3) void drm_printf(struct drm_printer *p, const char *f, ...); @@ -411,6 +416,65 @@ static inline struct drm_printer drm_err_printer(struct drm_device *drm, return p; } +/** + * drm_line_printer - construct a &drm_printer that prefixes outputs with line numbers + * @p: the &struct drm_printer which actually generates the output + * @prefix: optional output prefix, or NULL for no prefix + * @series: optional unique series identifier, or 0 to omit identifier in the output + * + * This printer can be used to increase the robustness of the captured output + * to make sure we didn't lost any intermediate lines of the output. Helpful + * while capturing some crash data. + * + * Example 1:: + * + * void crash_dump(struct drm_device *drm) + * { + * static unsigned int id; + * struct drm_printer p = drm_err_printer(drm, "crash"); + * struct drm_printer lp = drm_line_printer(&p, "dump", ++id); + * + * drm_printf(&lp, "foo"); + * drm_printf(&lp, "bar"); + * } + * + * Above code will print into the dmesg something like:: + * + * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.1: foo + * [ ] 0000:00:00.0: [drm] *ERROR* crash dump 1.2: bar + * + * Example 2:: + * + * void line_dump(struct device *dev) + * { + * struct drm_printer p = drm_info_printer(dev); + * struct drm_printer lp = drm_line_printer(&p, NULL, 0); + * + * drm_printf(&lp, "foo"); + * drm_printf(&lp, "bar"); + * } + * + * Above code will print:: + * + * [ ] 0000:00:00.0: [drm] 1: foo + * [ ] 0000:00:00.0: [drm] 2: bar + * + * RETURNS: + * The &drm_printer object + */ +static inline struct drm_printer drm_line_printer(struct drm_printer *p, + const char *prefix, + unsigned int series) +{ + struct drm_printer lp = { + .printfn = __drm_printfn_line, + .arg = p, + .prefix = prefix, + .line = { .series = series, }, + }; + return lp; +} + /* * struct device based logging * diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 9c437a057e5d..95e17504e46a 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -33,11 +33,11 @@ #define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000) /** - * DRM_SCHED_FENCE_DONT_PIPELINE - Prefent dependency pipelining + * DRM_SCHED_FENCE_DONT_PIPELINE - Prevent dependency pipelining * * Setting this flag on a scheduler fence prevents pipelining of jobs depending * on this fence. In other words we always insert a full CPU round trip before - * dependen jobs are pushed to the hw queue. + * dependent jobs are pushed to the hw queue. */ #define DRM_SCHED_FENCE_DONT_PIPELINE DMA_FENCE_FLAG_USER_BITS @@ -71,7 +71,7 @@ enum drm_sched_priority { DRM_SCHED_PRIORITY_COUNT }; -/* Used to chose between FIFO and RR jobs scheduling */ +/* Used to choose between FIFO and RR job-scheduling */ extern int drm_sched_policy; #define DRM_SCHED_POLICY_RR 0 @@ -97,13 +97,21 @@ struct drm_sched_entity { struct list_head list; /** + * @lock: + * + * Lock protecting the run-queue (@rq) to which this entity belongs, + * @priority and the list of schedulers (@sched_list, @num_sched_list). + */ + spinlock_t lock; + + /** * @rq: * * Runqueue on which this entity is currently scheduled. * * FIXME: Locking is very unclear for this. Writers are protected by - * @rq_lock, but readers are generally lockless and seem to just race - * with not even a READ_ONCE. + * @lock, but readers are generally lockless and seem to just race with + * not even a READ_ONCE. */ struct drm_sched_rq *rq; @@ -136,18 +144,11 @@ struct drm_sched_entity { * @priority: * * Priority of the entity. This can be modified by calling - * drm_sched_entity_set_priority(). Protected by &rq_lock. + * drm_sched_entity_set_priority(). Protected by @lock. */ enum drm_sched_priority priority; /** - * @rq_lock: - * - * Lock to modify the runqueue to which this entity belongs. - */ - spinlock_t rq_lock; - - /** * @job_queue: the list of jobs of this entity. */ struct spsc_queue job_queue; @@ -198,7 +199,7 @@ struct drm_sched_entity { * * Points to the finished fence of the last scheduled job. Only written * by the scheduler thread, can be accessed locklessly from - * drm_sched_job_arm() iff the queue is empty. + * drm_sched_job_arm() if the queue is empty. */ struct dma_fence __rcu *last_scheduled; @@ -243,21 +244,23 @@ struct drm_sched_entity { /** * struct drm_sched_rq - queue of entities to be scheduled. * - * @lock: to modify the entities list. * @sched: the scheduler to which this rq belongs to. - * @entities: list of the entities to be scheduled. + * @lock: protects @entities, @rb_tree_root and @current_entity. * @current_entity: the entity which is to be scheduled. - * @rb_tree_root: root of time based priory queue of entities for FIFO scheduling + * @entities: list of the entities to be scheduled. + * @rb_tree_root: root of time based priority queue of entities for FIFO scheduling * * Run queue is a set of entities scheduling command submissions for * one specific ring. It implements the scheduling policy that selects * the next entity to emit commands from. */ struct drm_sched_rq { - spinlock_t lock; struct drm_gpu_scheduler *sched; - struct list_head entities; + + spinlock_t lock; + /* Following members are protected by the @lock: */ struct drm_sched_entity *current_entity; + struct list_head entities; struct rb_root_cached rb_tree_root; }; @@ -321,7 +324,7 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); * @s_fence: contains the fences for the scheduling of job. * @finish_cb: the callback for the finished fence. * @credits: the number of credits this job contributes to the scheduler - * @work: Helper to reschdeule job kill to different context. + * @work: Helper to reschedule job kill to different context. * @id: a unique id assigned to each job scheduled on the scheduler. * @karma: increment on every hang caused by this job. If this exceeds the hang * limit of the scheduler then the job is marked guilty and will not @@ -337,6 +340,14 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f); struct drm_sched_job { struct spsc_node queue_node; struct list_head list; + + /** + * @sched: + * + * The scheduler this job is or will be scheduled on. Gets set by + * drm_sched_job_arm(). Valid until drm_sched_backend_ops.free_job() + * has finished. + */ struct drm_gpu_scheduler *sched; struct drm_sched_fence *s_fence; @@ -579,7 +590,7 @@ bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched); void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched); void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad); -void drm_sched_start(struct drm_gpu_scheduler *sched); +void drm_sched_start(struct drm_gpu_scheduler *sched, int errno); void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched); void drm_sched_increase_karma(struct drm_sched_job *bad); void drm_sched_reset_karma(struct drm_sched_job *bad); @@ -593,7 +604,8 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq, void drm_sched_rq_remove_entity(struct drm_sched_rq *rq, struct drm_sched_entity *entity); -void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts); +void drm_sched_rq_update_fifo_locked(struct drm_sched_entity *entity, + struct drm_sched_rq *rq, ktime_t ts); int drm_sched_entity_init(struct drm_sched_entity *entity, enum drm_sched_priority priority, diff --git a/include/drm/intel/i915_pciids.h b/include/drm/intel/pciids.h index f35534522d33..32480b5563db 100644 --- a/include/drm/intel/i915_pciids.h +++ b/include/drm/intel/pciids.h @@ -22,30 +22,23 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ -#ifndef _I915_PCIIDS_H -#define _I915_PCIIDS_H - -/* - * A pci_device_id struct { - * __u32 vendor, device; - * __u32 subvendor, subdevice; - * __u32 class, class_mask; - * kernel_ulong_t driver_data; - * }; - * Don't use C99 here because "class" is reserved and we want to - * give userspace flexibility. - */ -#define INTEL_VGA_DEVICE(id, info) { \ - 0x8086, id, \ - ~0, ~0, \ - 0x030000, 0xff0000, \ - (unsigned long) info } - -#define INTEL_QUANTA_VGA_DEVICE(info) { \ - 0x8086, 0x16a, \ - 0x152d, 0x8990, \ - 0x030000, 0xff0000, \ - (unsigned long) info } +#ifndef __PCIIDS_H__ +#define __PCIIDS_H__ + +#ifdef __KERNEL__ +#define INTEL_VGA_DEVICE(_id, _info) { \ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, (_id)), \ + .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \ + .driver_data = (kernel_ulong_t)(_info), \ +} + +#define INTEL_QUANTA_VGA_DEVICE(_info) { \ + .vendor = PCI_VENDOR_ID_INTEL, .device = 0x16a, \ + .subvendor = 0x152d, .subdevice = 0x8990, \ + .class = PCI_BASE_CLASS_DISPLAY << 16, .class_mask = 0xff << 16, \ + .driver_data = (kernel_ulong_t)(_info), \ +} +#endif #define INTEL_I810_IDS(MACRO__, ...) \ MACRO__(0x7121, ## __VA_ARGS__), /* I810 */ \ @@ -790,13 +783,28 @@ /* MTL */ #define INTEL_MTL_IDS(MACRO__, ...) \ - INTEL_ARL_IDS(MACRO__, ## __VA_ARGS__), \ MACRO__(0x7D40, ## __VA_ARGS__), \ MACRO__(0x7D45, ## __VA_ARGS__), \ MACRO__(0x7D55, ## __VA_ARGS__), \ MACRO__(0x7D60, ## __VA_ARGS__), \ MACRO__(0x7DD5, ## __VA_ARGS__) +/* PVC */ +#define INTEL_PVC_IDS(MACRO__, ...) \ + MACRO__(0x0B69, ## __VA_ARGS__), \ + MACRO__(0x0B6E, ## __VA_ARGS__), \ + MACRO__(0x0BD4, ## __VA_ARGS__), \ + MACRO__(0x0BD5, ## __VA_ARGS__), \ + MACRO__(0x0BD6, ## __VA_ARGS__), \ + MACRO__(0x0BD7, ## __VA_ARGS__), \ + MACRO__(0x0BD8, ## __VA_ARGS__), \ + MACRO__(0x0BD9, ## __VA_ARGS__), \ + MACRO__(0x0BDA, ## __VA_ARGS__), \ + MACRO__(0x0BDB, ## __VA_ARGS__), \ + MACRO__(0x0BE0, ## __VA_ARGS__), \ + MACRO__(0x0BE1, ## __VA_ARGS__), \ + MACRO__(0x0BE5, ## __VA_ARGS__) + /* LNL */ #define INTEL_LNL_IDS(MACRO__, ...) \ MACRO__(0x6420, ## __VA_ARGS__), \ @@ -811,4 +819,16 @@ MACRO__(0xE20D, ## __VA_ARGS__), \ MACRO__(0xE212, ## __VA_ARGS__) -#endif /* _I915_PCIIDS_H */ +/* PTL */ +#define INTEL_PTL_IDS(MACRO__, ...) \ + MACRO__(0xB080, ## __VA_ARGS__), \ + MACRO__(0xB081, ## __VA_ARGS__), \ + MACRO__(0xB082, ## __VA_ARGS__), \ + MACRO__(0xB090, ## __VA_ARGS__), \ + MACRO__(0xB091, ## __VA_ARGS__), \ + MACRO__(0xB092, ## __VA_ARGS__), \ + MACRO__(0xB0A0, ## __VA_ARGS__), \ + MACRO__(0xB0A1, ## __VA_ARGS__), \ + MACRO__(0xB0A2, ## __VA_ARGS__) + +#endif /* __PCIIDS_H__ */ diff --git a/include/drm/intel/xe_pciids.h b/include/drm/intel/xe_pciids.h deleted file mode 100644 index 644872a35c35..000000000000 --- a/include/drm/intel/xe_pciids.h +++ /dev/null @@ -1,202 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2022 Intel Corporation - */ - -#ifndef _XE_PCIIDS_H_ -#define _XE_PCIIDS_H_ - -/* - * Lists below can be turned into initializers for a struct pci_device_id - * by defining INTEL_VGA_DEVICE: - * - * #define INTEL_VGA_DEVICE(id, info) { \ - * 0x8086, id, \ - * ~0, ~0, \ - * 0x030000, 0xff0000, \ - * (unsigned long) info } - * - * And then calling like: - * - * XE_TGL_12_GT1_IDS(INTEL_VGA_DEVICE, ## __VA_ARGS__) - * - * To turn them into something else, just provide a different macro passed as - * first argument. - */ - -/* TGL */ -#define XE_TGL_GT1_IDS(MACRO__, ...) \ - MACRO__(0x9A60, ## __VA_ARGS__), \ - MACRO__(0x9A68, ## __VA_ARGS__), \ - MACRO__(0x9A70, ## __VA_ARGS__) - -#define XE_TGL_GT2_IDS(MACRO__, ...) \ - MACRO__(0x9A40, ## __VA_ARGS__), \ - MACRO__(0x9A49, ## __VA_ARGS__), \ - MACRO__(0x9A59, ## __VA_ARGS__), \ - MACRO__(0x9A78, ## __VA_ARGS__), \ - MACRO__(0x9AC0, ## __VA_ARGS__), \ - MACRO__(0x9AC9, ## __VA_ARGS__), \ - MACRO__(0x9AD9, ## __VA_ARGS__), \ - MACRO__(0x9AF8, ## __VA_ARGS__) - -#define XE_TGL_IDS(MACRO__, ...) \ - XE_TGL_GT1_IDS(MACRO__, ## __VA_ARGS__),\ - XE_TGL_GT2_IDS(MACRO__, ## __VA_ARGS__) - -/* RKL */ -#define XE_RKL_IDS(MACRO__, ...) \ - MACRO__(0x4C80, ## __VA_ARGS__), \ - MACRO__(0x4C8A, ## __VA_ARGS__), \ - MACRO__(0x4C8B, ## __VA_ARGS__), \ - MACRO__(0x4C8C, ## __VA_ARGS__), \ - MACRO__(0x4C90, ## __VA_ARGS__), \ - MACRO__(0x4C9A, ## __VA_ARGS__) - -/* DG1 */ -#define XE_DG1_IDS(MACRO__, ...) \ - MACRO__(0x4905, ## __VA_ARGS__), \ - MACRO__(0x4906, ## __VA_ARGS__), \ - MACRO__(0x4907, ## __VA_ARGS__), \ - MACRO__(0x4908, ## __VA_ARGS__), \ - MACRO__(0x4909, ## __VA_ARGS__) - -/* ADL-S */ -#define XE_ADLS_IDS(MACRO__, ...) \ - MACRO__(0x4680, ## __VA_ARGS__), \ - MACRO__(0x4682, ## __VA_ARGS__), \ - MACRO__(0x4688, ## __VA_ARGS__), \ - MACRO__(0x468A, ## __VA_ARGS__), \ - MACRO__(0x468B, ## __VA_ARGS__), \ - MACRO__(0x4690, ## __VA_ARGS__), \ - MACRO__(0x4692, ## __VA_ARGS__), \ - MACRO__(0x4693, ## __VA_ARGS__) - -/* ADL-P */ -#define XE_ADLP_IDS(MACRO__, ...) \ - MACRO__(0x46A0, ## __VA_ARGS__), \ - MACRO__(0x46A1, ## __VA_ARGS__), \ - MACRO__(0x46A2, ## __VA_ARGS__), \ - MACRO__(0x46A3, ## __VA_ARGS__), \ - MACRO__(0x46A6, ## __VA_ARGS__), \ - MACRO__(0x46A8, ## __VA_ARGS__), \ - MACRO__(0x46AA, ## __VA_ARGS__), \ - MACRO__(0x462A, ## __VA_ARGS__), \ - MACRO__(0x4626, ## __VA_ARGS__), \ - MACRO__(0x4628, ## __VA_ARGS__), \ - MACRO__(0x46B0, ## __VA_ARGS__), \ - MACRO__(0x46B1, ## __VA_ARGS__), \ - MACRO__(0x46B2, ## __VA_ARGS__), \ - MACRO__(0x46B3, ## __VA_ARGS__), \ - MACRO__(0x46C0, ## __VA_ARGS__), \ - MACRO__(0x46C1, ## __VA_ARGS__), \ - MACRO__(0x46C2, ## __VA_ARGS__), \ - MACRO__(0x46C3, ## __VA_ARGS__) - -/* ADL-N */ -#define XE_ADLN_IDS(MACRO__, ...) \ - MACRO__(0x46D0, ## __VA_ARGS__), \ - MACRO__(0x46D1, ## __VA_ARGS__), \ - MACRO__(0x46D2, ## __VA_ARGS__) - -/* RPL-S */ -#define XE_RPLS_IDS(MACRO__, ...) \ - MACRO__(0xA780, ## __VA_ARGS__), \ - MACRO__(0xA781, ## __VA_ARGS__), \ - MACRO__(0xA782, ## __VA_ARGS__), \ - MACRO__(0xA783, ## __VA_ARGS__), \ - MACRO__(0xA788, ## __VA_ARGS__), \ - MACRO__(0xA789, ## __VA_ARGS__), \ - MACRO__(0xA78A, ## __VA_ARGS__), \ - MACRO__(0xA78B, ## __VA_ARGS__) - -/* RPL-U */ -#define XE_RPLU_IDS(MACRO__, ...) \ - MACRO__(0xA721, ## __VA_ARGS__), \ - MACRO__(0xA7A1, ## __VA_ARGS__), \ - MACRO__(0xA7A9, ## __VA_ARGS__), \ - MACRO__(0xA7AC, ## __VA_ARGS__), \ - MACRO__(0xA7AD, ## __VA_ARGS__) - -/* RPL-P */ -#define XE_RPLP_IDS(MACRO__, ...) \ - XE_RPLU_IDS(MACRO__, ## __VA_ARGS__), \ - MACRO__(0xA720, ## __VA_ARGS__), \ - MACRO__(0xA7A0, ## __VA_ARGS__), \ - MACRO__(0xA7A8, ## __VA_ARGS__), \ - MACRO__(0xA7AA, ## __VA_ARGS__), \ - MACRO__(0xA7AB, ## __VA_ARGS__) - -/* DG2 */ -#define XE_DG2_G10_IDS(MACRO__, ...) \ - MACRO__(0x5690, ## __VA_ARGS__), \ - MACRO__(0x5691, ## __VA_ARGS__), \ - MACRO__(0x5692, ## __VA_ARGS__), \ - MACRO__(0x56A0, ## __VA_ARGS__), \ - MACRO__(0x56A1, ## __VA_ARGS__), \ - MACRO__(0x56A2, ## __VA_ARGS__), \ - MACRO__(0x56BE, ## __VA_ARGS__), \ - MACRO__(0x56BF, ## __VA_ARGS__) - -#define XE_DG2_G11_IDS(MACRO__, ...) \ - MACRO__(0x5693, ## __VA_ARGS__), \ - MACRO__(0x5694, ## __VA_ARGS__), \ - MACRO__(0x5695, ## __VA_ARGS__), \ - MACRO__(0x56A5, ## __VA_ARGS__), \ - MACRO__(0x56A6, ## __VA_ARGS__), \ - MACRO__(0x56B0, ## __VA_ARGS__), \ - MACRO__(0x56B1, ## __VA_ARGS__), \ - MACRO__(0x56BA, ## __VA_ARGS__), \ - MACRO__(0x56BB, ## __VA_ARGS__), \ - MACRO__(0x56BC, ## __VA_ARGS__), \ - MACRO__(0x56BD, ## __VA_ARGS__) - -#define XE_DG2_G12_IDS(MACRO__, ...) \ - MACRO__(0x5696, ## __VA_ARGS__), \ - MACRO__(0x5697, ## __VA_ARGS__), \ - MACRO__(0x56A3, ## __VA_ARGS__), \ - MACRO__(0x56A4, ## __VA_ARGS__), \ - MACRO__(0x56B2, ## __VA_ARGS__), \ - MACRO__(0x56B3, ## __VA_ARGS__) - -#define XE_DG2_IDS(MACRO__, ...) \ - XE_DG2_G10_IDS(MACRO__, ## __VA_ARGS__),\ - XE_DG2_G11_IDS(MACRO__, ## __VA_ARGS__),\ - XE_DG2_G12_IDS(MACRO__, ## __VA_ARGS__) - -#define XE_ATS_M150_IDS(MACRO__, ...) \ - MACRO__(0x56C0, ## __VA_ARGS__), \ - MACRO__(0x56C2, ## __VA_ARGS__) - -#define XE_ATS_M75_IDS(MACRO__, ...) \ - MACRO__(0x56C1, ## __VA_ARGS__) - -#define XE_ATS_M_IDS(MACRO__, ...) \ - XE_ATS_M150_IDS(MACRO__, ## __VA_ARGS__),\ - XE_ATS_M75_IDS(MACRO__, ## __VA_ARGS__) - -/* MTL / ARL */ -#define XE_MTL_IDS(MACRO__, ...) \ - MACRO__(0x7D40, ## __VA_ARGS__), \ - MACRO__(0x7D41, ## __VA_ARGS__), \ - MACRO__(0x7D45, ## __VA_ARGS__), \ - MACRO__(0x7D51, ## __VA_ARGS__), \ - MACRO__(0x7D55, ## __VA_ARGS__), \ - MACRO__(0x7D60, ## __VA_ARGS__), \ - MACRO__(0x7D67, ## __VA_ARGS__), \ - MACRO__(0x7DD1, ## __VA_ARGS__), \ - MACRO__(0x7DD5, ## __VA_ARGS__) - -#define XE_LNL_IDS(MACRO__, ...) \ - MACRO__(0x6420, ## __VA_ARGS__), \ - MACRO__(0x64A0, ## __VA_ARGS__), \ - MACRO__(0x64B0, ## __VA_ARGS__) - -#define XE_BMG_IDS(MACRO__, ...) \ - MACRO__(0xE202, ## __VA_ARGS__), \ - MACRO__(0xE20B, ## __VA_ARGS__), \ - MACRO__(0xE20C, ## __VA_ARGS__), \ - MACRO__(0xE20D, ## __VA_ARGS__), \ - MACRO__(0xE212, ## __VA_ARGS__) - -#endif diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h index 7b56d1ca36d7..5804408815be 100644 --- a/include/drm/ttm/ttm_bo.h +++ b/include/drm/ttm/ttm_bo.h @@ -462,5 +462,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo); pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, pgprot_t tmp); void ttm_bo_tt_destroy(struct ttm_buffer_object *bo); +int ttm_bo_populate(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx); #endif diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h index c22f30535c84..39b8636b1845 100644 --- a/include/drm/ttm/ttm_device.h +++ b/include/drm/ttm/ttm_device.h @@ -252,9 +252,10 @@ struct ttm_device { spinlock_t lru_lock; /** - * @pinned: Buffer objects which are pinned and so not on any LRU list. + * @unevictable: Buffer objects which are pinned or swapped and as such + * not on an LRU list. */ - struct list_head pinned; + struct list_head unevictable; /** * @dev_mapping: A pointer to the struct address_space for invalidating diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 2b9d856ff388..991edafdb2dd 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -129,6 +129,11 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt) return tt->page_flags & TTM_TT_FLAG_PRIV_POPULATED; } +static inline bool ttm_tt_is_swapped(const struct ttm_tt *tt) +{ + return tt->page_flags & TTM_TT_FLAG_SWAPPED; +} + /** * ttm_tt_create * diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h index 8332f8d82f96..e850dc3a1ad3 100644 --- a/include/dt-bindings/arm/qcom,ids.h +++ b/include/dt-bindings/arm/qcom,ids.h @@ -255,8 +255,10 @@ #define QCOM_ID_IPQ9510 521 #define QCOM_ID_QRB4210 523 #define QCOM_ID_QRB2210 524 +#define QCOM_ID_SAR2130P 525 #define QCOM_ID_SM8475 530 #define QCOM_ID_SM8475P 531 +#define QCOM_ID_SA8255P 532 #define QCOM_ID_SA8775P 534 #define QCOM_ID_QRU1000 539 #define QCOM_ID_SM8475_2 540 @@ -264,6 +266,7 @@ #define QCOM_ID_X1E80100 555 #define QCOM_ID_SM8650 557 #define QCOM_ID_SM4450 568 +#define QCOM_ID_SAR1130P 579 #define QCOM_ID_QDU1010 587 #define QCOM_ID_QRU1032 588 #define QCOM_ID_QRU1052 589 @@ -276,8 +279,12 @@ #define QCOM_ID_QCM8550 604 #define QCOM_ID_IPQ5300 624 #define QCOM_ID_IPQ5321 650 +#define QCOM_ID_IPQ5424 651 +#define QCOM_ID_IPQ5404 671 +#define QCOM_ID_QCS9100 667 #define QCOM_ID_QCS8300 674 #define QCOM_ID_QCS8275 675 +#define QCOM_ID_QCS615 680 /* * The board type and revision information, used by Qualcomm bootloaders and diff --git a/include/dt-bindings/clock/aspeed,ast2700-scu.h b/include/dt-bindings/clock/aspeed,ast2700-scu.h new file mode 100644 index 000000000000..63021af3caf5 --- /dev/null +++ b/include/dt-bindings/clock/aspeed,ast2700-scu.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Device Tree binding constants for AST2700 clock controller. + * + * Copyright (c) 2024 Aspeed Technology Inc. + */ + +#ifndef __DT_BINDINGS_CLOCK_AST2700_H +#define __DT_BINDINGS_CLOCK_AST2700_H + +/* SOC0 clk */ +#define SCU0_CLKIN 0 +#define SCU0_CLK_24M 1 +#define SCU0_CLK_192M 2 +#define SCU0_CLK_UART 3 +#define SCU0_CLK_UART_DIV13 3 +#define SCU0_CLK_PSP 4 +#define SCU0_CLK_HPLL 5 +#define SCU0_CLK_HPLL_DIV2 6 +#define SCU0_CLK_HPLL_DIV4 7 +#define SCU0_CLK_HPLL_DIV_AHB 8 +#define SCU0_CLK_DPLL 9 +#define SCU0_CLK_MPLL 10 +#define SCU0_CLK_MPLL_DIV2 11 +#define SCU0_CLK_MPLL_DIV4 12 +#define SCU0_CLK_MPLL_DIV8 13 +#define SCU0_CLK_MPLL_DIV_AHB 14 +#define SCU0_CLK_D0 15 +#define SCU0_CLK_D1 16 +#define SCU0_CLK_CRT0 17 +#define SCU0_CLK_CRT1 18 +#define SCU0_CLK_MPHY 19 +#define SCU0_CLK_AXI0 20 +#define SCU0_CLK_AXI1 21 +#define SCU0_CLK_AHB 22 +#define SCU0_CLK_APB 23 +#define SCU0_CLK_UART4 24 +#define SCU0_CLK_EMMCMUX 25 +#define SCU0_CLK_EMMC 26 +#define SCU0_CLK_U2PHY_CLK12M 27 +#define SCU0_CLK_U2PHY_REFCLK 28 + +/* SOC0 clk-gate */ +#define SCU0_CLK_GATE_MCLK 29 +#define SCU0_CLK_GATE_ECLK 30 +#define SCU0_CLK_GATE_2DCLK 31 +#define SCU0_CLK_GATE_VCLK 32 +#define SCU0_CLK_GATE_BCLK 33 +#define SCU0_CLK_GATE_VGA0CLK 34 +#define SCU0_CLK_GATE_REFCLK 35 +#define SCU0_CLK_GATE_PORTBUSB2CLK 36 +#define SCU0_CLK_GATE_UHCICLK 37 +#define SCU0_CLK_GATE_VGA1CLK 38 +#define SCU0_CLK_GATE_DDRPHYCLK 39 +#define SCU0_CLK_GATE_E2M0CLK 40 +#define SCU0_CLK_GATE_HACCLK 41 +#define SCU0_CLK_GATE_PORTAUSB2CLK 42 +#define SCU0_CLK_GATE_UART4CLK 43 +#define SCU0_CLK_GATE_SLICLK 44 +#define SCU0_CLK_GATE_DACCLK 45 +#define SCU0_CLK_GATE_DP 46 +#define SCU0_CLK_GATE_E2M1CLK 47 +#define SCU0_CLK_GATE_CRT0CLK 48 +#define SCU0_CLK_GATE_CRT1CLK 49 +#define SCU0_CLK_GATE_ECDSACLK 50 +#define SCU0_CLK_GATE_RSACLK 51 +#define SCU0_CLK_GATE_RVAS0CLK 52 +#define SCU0_CLK_GATE_UFSCLK 53 +#define SCU0_CLK_GATE_EMMCCLK 54 +#define SCU0_CLK_GATE_RVAS1CLK 55 + +/* SOC1 clk */ +#define SCU1_CLKIN 0 +#define SCU1_CLK_HPLL 1 +#define SCU1_CLK_APLL 2 +#define SCU1_CLK_APLL_DIV2 3 +#define SCU1_CLK_APLL_DIV4 4 +#define SCU1_CLK_DPLL 5 +#define SCU1_CLK_UXCLK 6 +#define SCU1_CLK_HUXCLK 7 +#define SCU1_CLK_UARTX 8 +#define SCU1_CLK_HUARTX 9 +#define SCU1_CLK_AHB 10 +#define SCU1_CLK_APB 11 +#define SCU1_CLK_UART0 12 +#define SCU1_CLK_UART1 13 +#define SCU1_CLK_UART2 14 +#define SCU1_CLK_UART3 15 +#define SCU1_CLK_UART5 16 +#define SCU1_CLK_UART6 17 +#define SCU1_CLK_UART7 18 +#define SCU1_CLK_UART8 19 +#define SCU1_CLK_UART9 20 +#define SCU1_CLK_UART10 21 +#define SCU1_CLK_UART11 22 +#define SCU1_CLK_UART12 23 +#define SCU1_CLK_UART13 24 +#define SCU1_CLK_UART14 25 +#define SCU1_CLK_APLL_DIVN 26 +#define SCU1_CLK_SDMUX 27 +#define SCU1_CLK_SDCLK 28 +#define SCU1_CLK_RMII 29 +#define SCU1_CLK_RGMII 30 +#define SCU1_CLK_MACHCLK 31 +#define SCU1_CLK_MAC0RCLK 32 +#define SCU1_CLK_MAC1RCLK 33 +#define SCU1_CLK_CAN 34 + +/* SOC1 clk gate */ +#define SCU1_CLK_GATE_LCLK0 35 +#define SCU1_CLK_GATE_LCLK1 36 +#define SCU1_CLK_GATE_ESPI0CLK 37 +#define SCU1_CLK_GATE_ESPI1CLK 38 +#define SCU1_CLK_GATE_SDCLK 39 +#define SCU1_CLK_GATE_IPEREFCLK 40 +#define SCU1_CLK_GATE_REFCLK 41 +#define SCU1_CLK_GATE_LPCHCLK 42 +#define SCU1_CLK_GATE_MAC0CLK 43 +#define SCU1_CLK_GATE_MAC1CLK 44 +#define SCU1_CLK_GATE_MAC2CLK 45 +#define SCU1_CLK_GATE_UART0CLK 46 +#define SCU1_CLK_GATE_UART1CLK 47 +#define SCU1_CLK_GATE_UART2CLK 48 +#define SCU1_CLK_GATE_UART3CLK 49 +#define SCU1_CLK_GATE_I2CCLK 50 +#define SCU1_CLK_GATE_I3C0CLK 51 +#define SCU1_CLK_GATE_I3C1CLK 52 +#define SCU1_CLK_GATE_I3C2CLK 53 +#define SCU1_CLK_GATE_I3C3CLK 54 +#define SCU1_CLK_GATE_I3C4CLK 55 +#define SCU1_CLK_GATE_I3C5CLK 56 +#define SCU1_CLK_GATE_I3C6CLK 57 +#define SCU1_CLK_GATE_I3C7CLK 58 +#define SCU1_CLK_GATE_I3C8CLK 59 +#define SCU1_CLK_GATE_I3C9CLK 60 +#define SCU1_CLK_GATE_I3C10CLK 61 +#define SCU1_CLK_GATE_I3C11CLK 62 +#define SCU1_CLK_GATE_I3C12CLK 63 +#define SCU1_CLK_GATE_I3C13CLK 64 +#define SCU1_CLK_GATE_I3C14CLK 65 +#define SCU1_CLK_GATE_I3C15CLK 66 +#define SCU1_CLK_GATE_UART5CLK 67 +#define SCU1_CLK_GATE_UART6CLK 68 +#define SCU1_CLK_GATE_UART7CLK 69 +#define SCU1_CLK_GATE_UART8CLK 70 +#define SCU1_CLK_GATE_UART9CLK 71 +#define SCU1_CLK_GATE_UART10CLK 72 +#define SCU1_CLK_GATE_UART11CLK 73 +#define SCU1_CLK_GATE_UART12CLK 74 +#define SCU1_CLK_GATE_FSICLK 75 +#define SCU1_CLK_GATE_LTPIPHYCLK 76 +#define SCU1_CLK_GATE_LTPICLK 77 +#define SCU1_CLK_GATE_VGALCLK 78 +#define SCU1_CLK_GATE_UHCICLK 79 +#define SCU1_CLK_GATE_CANCLK 80 +#define SCU1_CLK_GATE_PCICLK 81 +#define SCU1_CLK_GATE_SLICLK 82 +#define SCU1_CLK_GATE_E2MCLK 83 +#define SCU1_CLK_GATE_PORTCUSB2CLK 84 +#define SCU1_CLK_GATE_PORTDUSB2CLK 85 +#define SCU1_CLK_GATE_LTPI1TXCLK 86 + +#endif diff --git a/include/dt-bindings/clock/fsd-clk.h b/include/dt-bindings/clock/fsd-clk.h index c8a2af1dd1ad..3f7b64d93558 100644 --- a/include/dt-bindings/clock/fsd-clk.h +++ b/include/dt-bindings/clock/fsd-clk.h @@ -28,7 +28,6 @@ #define DOUT_CMU_IMEM_ACLK 13 #define DOUT_CMU_IMEM_DMACLK 14 #define GAT_CMU_FSYS0_SHARED0DIV4 15 -#define CMU_NR_CLK 16 /* PERIC */ #define PERIC_SCLK_UART0 1 @@ -76,7 +75,6 @@ #define PERIC_EQOS_PHYRXCLK_MUX 43 #define PERIC_EQOS_PHYRXCLK 44 #define PERIC_DOUT_RGMII_CLK 45 -#define PERIC_NR_CLK 46 /* FSYS0 */ #define UFS0_MPHY_REFCLK_IXTAL24 1 @@ -101,7 +99,6 @@ #define FSYS0_EQOS_TOP0_IPCLKPORT_RGMII_CLK_I 20 #define FSYS0_EQOS_TOP0_IPCLKPORT_CLK_RX_I 21 #define FSYS0_DOUT_FSYS0_PERIBUS_GRP 22 -#define FSYS0_NR_CLK 23 /* FSYS1 */ #define PCIE_LINK0_IPCLKPORT_DBI_ACLK 1 @@ -112,7 +109,6 @@ #define PCIE_LINK1_IPCLKPORT_AUX_ACLK 6 #define PCIE_LINK1_IPCLKPORT_MSTR_ACLK 7 #define PCIE_LINK1_IPCLKPORT_SLV_ACLK 8 -#define FSYS1_NR_CLK 9 /* IMEM */ #define IMEM_DMA0_IPCLKPORT_ACLK 1 @@ -126,11 +122,9 @@ #define IMEM_TMU_TOP_IPCLKPORT_I_CLK_TS 9 #define IMEM_TMU_GPU_IPCLKPORT_I_CLK_TS 10 #define IMEM_TMU_GT_IPCLKPORT_I_CLK_TS 11 -#define IMEM_NR_CLK 12 /* MFC */ #define MFC_MFC_IPCLKPORT_ACLK 1 -#define MFC_NR_CLK 2 /* CAM_CSI */ #define CAM_CSI0_0_IPCLKPORT_I_ACLK 1 @@ -145,6 +139,5 @@ #define CAM_CSI2_1_IPCLKPORT_I_ACLK 10 #define CAM_CSI2_2_IPCLKPORT_I_ACLK 11 #define CAM_CSI2_3_IPCLKPORT_I_ACLK 12 -#define CAM_CSI_NR_CLK 13 #endif /*_DT_BINDINGS_CLOCK_FSD_H */ diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h index 787c9e74dc96..6c685067288b 100644 --- a/include/dt-bindings/clock/imx93-clock.h +++ b/include/dt-bindings/clock/imx93-clock.h @@ -204,6 +204,10 @@ #define IMX93_CLK_A55_SEL 199 #define IMX93_CLK_A55_CORE 200 #define IMX93_CLK_PDM_IPG 201 -#define IMX93_CLK_END 202 +#define IMX91_CLK_ENET1_QOS_TSN 202 +#define IMX91_CLK_ENET_TIMER 203 +#define IMX91_CLK_ENET2_REGULAR 204 +#define IMX91_CLK_ENET2_REGULAR_GATE 205 +#define IMX91_CLK_ENET1_QOS_TSN_GATE 206 #endif diff --git a/include/dt-bindings/clock/marvell,pxa1908.h b/include/dt-bindings/clock/marvell,pxa1908.h new file mode 100644 index 000000000000..fb15b0d0cd4c --- /dev/null +++ b/include/dt-bindings/clock/marvell,pxa1908.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +#ifndef __DTS_MARVELL_PXA1908_CLOCK_H +#define __DTS_MARVELL_PXA1908_CLOCK_H + +/* plls */ +#define PXA1908_CLK_CLK32 1 +#define PXA1908_CLK_VCTCXO 2 +#define PXA1908_CLK_PLL1_624 3 +#define PXA1908_CLK_PLL1_416 4 +#define PXA1908_CLK_PLL1_499 5 +#define PXA1908_CLK_PLL1_832 6 +#define PXA1908_CLK_PLL1_1248 7 +#define PXA1908_CLK_PLL1_D2 8 +#define PXA1908_CLK_PLL1_D4 9 +#define PXA1908_CLK_PLL1_D8 10 +#define PXA1908_CLK_PLL1_D16 11 +#define PXA1908_CLK_PLL1_D6 12 +#define PXA1908_CLK_PLL1_D12 13 +#define PXA1908_CLK_PLL1_D24 14 +#define PXA1908_CLK_PLL1_D48 15 +#define PXA1908_CLK_PLL1_D96 16 +#define PXA1908_CLK_PLL1_D13 17 +#define PXA1908_CLK_PLL1_32 18 +#define PXA1908_CLK_PLL1_208 19 +#define PXA1908_CLK_PLL1_117 20 +#define PXA1908_CLK_PLL1_416_GATE 21 +#define PXA1908_CLK_PLL1_624_GATE 22 +#define PXA1908_CLK_PLL1_832_GATE 23 +#define PXA1908_CLK_PLL1_1248_GATE 24 +#define PXA1908_CLK_PLL1_D2_GATE 25 +#define PXA1908_CLK_PLL1_499_EN 26 +#define PXA1908_CLK_PLL2VCO 27 +#define PXA1908_CLK_PLL2 28 +#define PXA1908_CLK_PLL2P 29 +#define PXA1908_CLK_PLL2VCODIV3 30 +#define PXA1908_CLK_PLL3VCO 31 +#define PXA1908_CLK_PLL3 32 +#define PXA1908_CLK_PLL3P 33 +#define PXA1908_CLK_PLL3VCODIV3 34 +#define PXA1908_CLK_PLL4VCO 35 +#define PXA1908_CLK_PLL4 36 +#define PXA1908_CLK_PLL4P 37 +#define PXA1908_CLK_PLL4VCODIV3 38 + +/* apb (apbc) peripherals */ +#define PXA1908_CLK_UART0 1 +#define PXA1908_CLK_UART1 2 +#define PXA1908_CLK_GPIO 3 +#define PXA1908_CLK_PWM0 4 +#define PXA1908_CLK_PWM1 5 +#define PXA1908_CLK_PWM2 6 +#define PXA1908_CLK_PWM3 7 +#define PXA1908_CLK_SSP0 8 +#define PXA1908_CLK_SSP1 9 +#define PXA1908_CLK_IPC_RST 10 +#define PXA1908_CLK_RTC 11 +#define PXA1908_CLK_TWSI0 12 +#define PXA1908_CLK_KPC 13 +#define PXA1908_CLK_SWJTAG 14 +#define PXA1908_CLK_SSP2 15 +#define PXA1908_CLK_TWSI1 16 +#define PXA1908_CLK_THERMAL 17 +#define PXA1908_CLK_TWSI3 18 + +/* apb (apbcp) peripherals */ +#define PXA1908_CLK_UART2 1 +#define PXA1908_CLK_TWSI2 2 +#define PXA1908_CLK_AICER 3 + +/* axi (apmu) peripherals */ +#define PXA1908_CLK_CCIC1 1 +#define PXA1908_CLK_ISP 2 +#define PXA1908_CLK_DSI1 3 +#define PXA1908_CLK_DISP1 4 +#define PXA1908_CLK_CCIC0 5 +#define PXA1908_CLK_SDH0 6 +#define PXA1908_CLK_SDH1 7 +#define PXA1908_CLK_USB 8 +#define PXA1908_CLK_NF 9 +#define PXA1908_CLK_CORE_DEBUG 10 +#define PXA1908_CLK_VPU 11 +#define PXA1908_CLK_GC 12 +#define PXA1908_CLK_SDH2 13 +#define PXA1908_CLK_GC2D 14 +#define PXA1908_CLK_TRACE 15 +#define PXA1908_CLK_DVC_DFC_DEBUG 16 + +#endif diff --git a/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h b/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h new file mode 100644 index 000000000000..b4705204409c --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-apmixedsys.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_APMIXEDSYS_H +#define _DT_BINDINGS_CLK_MT6735_APMIXEDSYS_H + +#define CLK_APMIXED_ARMPLL 0 +#define CLK_APMIXED_MAINPLL 1 +#define CLK_APMIXED_UNIVPLL 2 +#define CLK_APMIXED_MMPLL 3 +#define CLK_APMIXED_MSDCPLL 4 +#define CLK_APMIXED_VENCPLL 5 +#define CLK_APMIXED_TVDPLL 6 +#define CLK_APMIXED_APLL1 7 +#define CLK_APMIXED_APLL2 8 + +#endif diff --git a/include/dt-bindings/clock/mediatek,mt6735-imgsys.h b/include/dt-bindings/clock/mediatek,mt6735-imgsys.h new file mode 100644 index 000000000000..f250c26c5eb4 --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-imgsys.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_IMGSYS_H +#define _DT_BINDINGS_CLK_MT6735_IMGSYS_H + +#define CLK_IMG_SMI_LARB2 0 +#define CLK_IMG_CAM_SMI 1 +#define CLK_IMG_CAM_CAM 2 +#define CLK_IMG_SEN_TG 3 +#define CLK_IMG_SEN_CAM 4 +#define CLK_IMG_CAM_SV 5 +#define CLK_IMG_SUFOD 6 +#define CLK_IMG_FD 7 + +#endif /* _DT_BINDINGS_CLK_MT6735_IMGSYS_H */ diff --git a/include/dt-bindings/clock/mediatek,mt6735-infracfg.h b/include/dt-bindings/clock/mediatek,mt6735-infracfg.h new file mode 100644 index 000000000000..d8dd51e15637 --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-infracfg.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_INFRACFG_H +#define _DT_BINDINGS_CLK_MT6735_INFRACFG_H + +#define CLK_INFRA_DBG 0 +#define CLK_INFRA_GCE 1 +#define CLK_INFRA_TRBG 2 +#define CLK_INFRA_CPUM 3 +#define CLK_INFRA_DEVAPC 4 +#define CLK_INFRA_AUDIO 5 +#define CLK_INFRA_GCPU 6 +#define CLK_INFRA_L2C_SRAM 7 +#define CLK_INFRA_M4U 8 +#define CLK_INFRA_CLDMA 9 +#define CLK_INFRA_CONNMCU_BUS 10 +#define CLK_INFRA_KP 11 +#define CLK_INFRA_APXGPT 12 +#define CLK_INFRA_SEJ 13 +#define CLK_INFRA_CCIF0_AP 14 +#define CLK_INFRA_CCIF1_AP 15 +#define CLK_INFRA_PMIC_SPI 16 +#define CLK_INFRA_PMIC_WRAP 17 + +#endif diff --git a/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h b/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h new file mode 100644 index 000000000000..d2d99a48348a --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-mfgcfg.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_MFGCFG_H +#define _DT_BINDINGS_CLK_MT6735_MFGCFG_H + +#define CLK_MFG_BG3D 0 + +#endif /* _DT_BINDINGS_CLK_MT6735_MFGCFG_H */ diff --git a/include/dt-bindings/clock/mediatek,mt6735-pericfg.h b/include/dt-bindings/clock/mediatek,mt6735-pericfg.h new file mode 100644 index 000000000000..16bc21bbd95b --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-pericfg.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_PERICFG_H +#define _DT_BINDINGS_CLK_MT6735_PERICFG_H + +#define CLK_PERI_DISP_PWM 0 +#define CLK_PERI_THERM 1 +#define CLK_PERI_PWM1 2 +#define CLK_PERI_PWM2 3 +#define CLK_PERI_PWM3 4 +#define CLK_PERI_PWM4 5 +#define CLK_PERI_PWM5 6 +#define CLK_PERI_PWM6 7 +#define CLK_PERI_PWM7 8 +#define CLK_PERI_PWM 9 +#define CLK_PERI_USB0 10 +#define CLK_PERI_IRDA 11 +#define CLK_PERI_APDMA 12 +#define CLK_PERI_MSDC30_0 13 +#define CLK_PERI_MSDC30_1 14 +#define CLK_PERI_MSDC30_2 15 +#define CLK_PERI_MSDC30_3 16 +#define CLK_PERI_UART0 17 +#define CLK_PERI_UART1 18 +#define CLK_PERI_UART2 19 +#define CLK_PERI_UART3 20 +#define CLK_PERI_UART4 21 +#define CLK_PERI_BTIF 22 +#define CLK_PERI_I2C0 23 +#define CLK_PERI_I2C1 24 +#define CLK_PERI_I2C2 25 +#define CLK_PERI_I2C3 26 +#define CLK_PERI_AUXADC 27 +#define CLK_PERI_SPI0 28 +#define CLK_PERI_IRTX 29 + +#endif diff --git a/include/dt-bindings/clock/mediatek,mt6735-topckgen.h b/include/dt-bindings/clock/mediatek,mt6735-topckgen.h new file mode 100644 index 000000000000..d4b1e113cc0a --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-topckgen.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_TOPCKGEN_H +#define _DT_BINDINGS_CLK_MT6735_TOPCKGEN_H + +#define CLK_TOP_AD_SYS_26M_CK 0 +#define CLK_TOP_CLKPH_MCK_O 1 +#define CLK_TOP_DMPLL 2 +#define CLK_TOP_DPI_CK 3 +#define CLK_TOP_WHPLL_AUDIO_CK 4 + +#define CLK_TOP_SYSPLL_D2 5 +#define CLK_TOP_SYSPLL_D3 6 +#define CLK_TOP_SYSPLL_D5 7 +#define CLK_TOP_SYSPLL1_D2 8 +#define CLK_TOP_SYSPLL1_D4 9 +#define CLK_TOP_SYSPLL1_D8 10 +#define CLK_TOP_SYSPLL1_D16 11 +#define CLK_TOP_SYSPLL2_D2 12 +#define CLK_TOP_SYSPLL2_D4 13 +#define CLK_TOP_SYSPLL3_D2 14 +#define CLK_TOP_SYSPLL3_D4 15 +#define CLK_TOP_SYSPLL4_D2 16 +#define CLK_TOP_SYSPLL4_D4 17 +#define CLK_TOP_UNIVPLL_D2 18 +#define CLK_TOP_UNIVPLL_D3 19 +#define CLK_TOP_UNIVPLL_D5 20 +#define CLK_TOP_UNIVPLL_D26 21 +#define CLK_TOP_UNIVPLL1_D2 22 +#define CLK_TOP_UNIVPLL1_D4 23 +#define CLK_TOP_UNIVPLL1_D8 24 +#define CLK_TOP_UNIVPLL2_D2 25 +#define CLK_TOP_UNIVPLL2_D4 26 +#define CLK_TOP_UNIVPLL2_D8 27 +#define CLK_TOP_UNIVPLL3_D2 28 +#define CLK_TOP_UNIVPLL3_D4 29 +#define CLK_TOP_MSDCPLL_D2 30 +#define CLK_TOP_MSDCPLL_D4 31 +#define CLK_TOP_MSDCPLL_D8 32 +#define CLK_TOP_MSDCPLL_D16 33 +#define CLK_TOP_VENCPLL_D3 34 +#define CLK_TOP_TVDPLL_D2 35 +#define CLK_TOP_TVDPLL_D4 36 +#define CLK_TOP_DMPLL_D2 37 +#define CLK_TOP_DMPLL_D4 38 +#define CLK_TOP_DMPLL_D8 39 +#define CLK_TOP_AD_SYS_26M_D2 40 + +#define CLK_TOP_AXI_SEL 41 +#define CLK_TOP_MEM_SEL 42 +#define CLK_TOP_DDRPHY_SEL 43 +#define CLK_TOP_MM_SEL 44 +#define CLK_TOP_PWM_SEL 45 +#define CLK_TOP_VDEC_SEL 46 +#define CLK_TOP_MFG_SEL 47 +#define CLK_TOP_CAMTG_SEL 48 +#define CLK_TOP_UART_SEL 49 +#define CLK_TOP_SPI_SEL 50 +#define CLK_TOP_USB20_SEL 51 +#define CLK_TOP_MSDC50_0_SEL 52 +#define CLK_TOP_MSDC30_0_SEL 53 +#define CLK_TOP_MSDC30_1_SEL 54 +#define CLK_TOP_MSDC30_2_SEL 55 +#define CLK_TOP_MSDC30_3_SEL 56 +#define CLK_TOP_AUDIO_SEL 57 +#define CLK_TOP_AUDINTBUS_SEL 58 +#define CLK_TOP_PMICSPI_SEL 59 +#define CLK_TOP_SCP_SEL 60 +#define CLK_TOP_ATB_SEL 61 +#define CLK_TOP_DPI0_SEL 62 +#define CLK_TOP_SCAM_SEL 63 +#define CLK_TOP_MFG13M_SEL 64 +#define CLK_TOP_AUD1_SEL 65 +#define CLK_TOP_AUD2_SEL 66 +#define CLK_TOP_IRDA_SEL 67 +#define CLK_TOP_IRTX_SEL 68 +#define CLK_TOP_DISPPWM_SEL 69 + +#endif diff --git a/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h b/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h new file mode 100644 index 000000000000..f94cec10c89f --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-vdecsys.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_VDECSYS_H +#define _DT_BINDINGS_CLK_MT6735_VDECSYS_H + +#define CLK_VDEC_VDEC 0 +#define CLK_VDEC_SMI_LARB1 1 + +#endif /* _DT_BINDINGS_CLK_MT6735_VDECSYS_H */ diff --git a/include/dt-bindings/clock/mediatek,mt6735-vencsys.h b/include/dt-bindings/clock/mediatek,mt6735-vencsys.h new file mode 100644 index 000000000000..e5a9cb4f269f --- /dev/null +++ b/include/dt-bindings/clock/mediatek,mt6735-vencsys.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_CLK_MT6735_VENCSYS_H +#define _DT_BINDINGS_CLK_MT6735_VENCSYS_H + +#define CLK_VENC_SMI_LARB3 0 +#define CLK_VENC_VENC 1 +#define CLK_VENC_JPGENC 2 +#define CLK_VENC_JPGDEC 3 + +#endif /* _DT_BINDINGS_CLK_MT6735_VENCSYS_H */ diff --git a/include/dt-bindings/clock/mobileye,eyeq5-clk.h b/include/dt-bindings/clock/mobileye,eyeq5-clk.h index 26d8930335e4..f353c2988035 100644 --- a/include/dt-bindings/clock/mobileye,eyeq5-clk.h +++ b/include/dt-bindings/clock/mobileye,eyeq5-clk.h @@ -6,17 +6,60 @@ #ifndef _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H #define _DT_BINDINGS_CLOCK_MOBILEYE_EYEQ5_CLK_H -#define EQ5C_PLL_CPU 0 -#define EQ5C_PLL_VMP 1 -#define EQ5C_PLL_PMA 2 -#define EQ5C_PLL_VDI 3 -#define EQ5C_PLL_DDR0 4 -#define EQ5C_PLL_PCI 5 -#define EQ5C_PLL_PER 6 -#define EQ5C_PLL_PMAC 7 -#define EQ5C_PLL_MPC 8 -#define EQ5C_PLL_DDR1 9 - -#define EQ5C_DIV_OSPI 10 +#define EQ5C_PLL_CPU 0 +#define EQ5C_PLL_VMP 1 +#define EQ5C_PLL_PMA 2 +#define EQ5C_PLL_VDI 3 +#define EQ5C_PLL_DDR0 4 +#define EQ5C_PLL_PCI 5 +#define EQ5C_PLL_PER 6 +#define EQ5C_PLL_PMAC 7 +#define EQ5C_PLL_MPC 8 +#define EQ5C_PLL_DDR1 9 + +#define EQ5C_DIV_OSPI 10 + +/* EQ5C_PLL_CPU children */ +#define EQ5C_CPU_CORE0 11 +#define EQ5C_CPU_CORE1 12 +#define EQ5C_CPU_CORE2 13 +#define EQ5C_CPU_CORE3 14 + +/* EQ5C_PLL_PER children */ +#define EQ5C_PER_OCC 15 +#define EQ5C_PER_UART 16 +#define EQ5C_PER_SPI 17 +#define EQ5C_PER_I2C 18 +#define EQ5C_PER_GPIO 19 +#define EQ5C_PER_EMMC 20 +#define EQ5C_PER_OCC_PCI 21 + +#define EQ6LC_PLL_DDR 0 +#define EQ6LC_PLL_CPU 1 +#define EQ6LC_PLL_PER 2 +#define EQ6LC_PLL_VDI 3 + +#define EQ6HC_CENTRAL_PLL_CPU 0 +#define EQ6HC_CENTRAL_CPU_OCC 1 + +#define EQ6HC_WEST_PLL_PER 0 +#define EQ6HC_WEST_PER_OCC 1 +#define EQ6HC_WEST_PER_UART 2 + +#define EQ6HC_SOUTH_PLL_VDI 0 +#define EQ6HC_SOUTH_PLL_PCIE 1 +#define EQ6HC_SOUTH_PLL_PER 2 +#define EQ6HC_SOUTH_PLL_ISP 3 + +#define EQ6HC_SOUTH_DIV_EMMC 4 +#define EQ6HC_SOUTH_DIV_OSPI_REF 5 +#define EQ6HC_SOUTH_DIV_OSPI_SYS 6 +#define EQ6HC_SOUTH_DIV_TSU 7 + +#define EQ6HC_ACC_PLL_XNN 0 +#define EQ6HC_ACC_PLL_VMP 1 +#define EQ6HC_ACC_PLL_PMA 2 +#define EQ6HC_ACC_PLL_MPC 3 +#define EQ6HC_ACC_PLL_NOC 4 #endif diff --git a/include/dt-bindings/clock/qcom,gcc-sm8450.h b/include/dt-bindings/clock/qcom,gcc-sm8450.h index 9679410843a0..7320e63c3a2f 100644 --- a/include/dt-bindings/clock/qcom,gcc-sm8450.h +++ b/include/dt-bindings/clock/qcom,gcc-sm8450.h @@ -194,6 +194,9 @@ #define GCC_VIDEO_AXI0_CLK 182 #define GCC_VIDEO_AXI1_CLK 183 #define GCC_VIDEO_XO_CLK 184 +/* Additional SM8475-specific clocks */ +#define SM8475_GCC_GPLL2 185 +#define SM8475_GCC_GPLL3 186 /* GCC resets */ #define GCC_CAMERA_BCR 0 diff --git a/include/dt-bindings/clock/qcom,ipq5332-gcc.h b/include/dt-bindings/clock/qcom,ipq5332-gcc.h index 8a405a0a96d0..da9b507c30bf 100644 --- a/include/dt-bindings/clock/qcom,ipq5332-gcc.h +++ b/include/dt-bindings/clock/qcom,ipq5332-gcc.h @@ -96,15 +96,7 @@ #define GCC_PCNOC_BFDCD_CLK_SRC 87 #define GCC_PCNOC_LPASS_CLK 88 #define GCC_PRNG_AHB_CLK 89 -#define GCC_Q6_AHB_CLK 90 -#define GCC_Q6_AHB_S_CLK 91 -#define GCC_Q6_AXIM_CLK 92 #define GCC_Q6_AXIM_CLK_SRC 93 -#define GCC_Q6_AXIS_CLK 94 -#define GCC_Q6_TSCTR_1TO2_CLK 95 -#define GCC_Q6SS_ATBM_CLK 96 -#define GCC_Q6SS_PCLKDBG_CLK 97 -#define GCC_Q6SS_TRIG_CLK 98 #define GCC_QDSS_AT_CLK 99 #define GCC_QDSS_AT_CLK_SRC 100 #define GCC_QDSS_CFG_AHB_CLK 101 @@ -134,7 +126,6 @@ #define GCC_SNOC_PCIE3_2LANE_S_CLK 125 #define GCC_SNOC_USB_CLK 126 #define GCC_SYS_NOC_AT_CLK 127 -#define GCC_SYS_NOC_WCSS_AHB_CLK 128 #define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 129 #define GCC_UNIPHY0_AHB_CLK 130 #define GCC_UNIPHY0_SYS_CLK 131 @@ -155,17 +146,6 @@ #define GCC_USB0_PIPE_CLK 146 #define GCC_USB0_SLEEP_CLK 147 #define GCC_WCSS_AHB_CLK_SRC 148 -#define GCC_WCSS_AXIM_CLK 149 -#define GCC_WCSS_AXIS_CLK 150 -#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 151 -#define GCC_WCSS_DBG_IFC_APB_CLK 152 -#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 153 -#define GCC_WCSS_DBG_IFC_ATB_CLK 154 -#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 155 -#define GCC_WCSS_DBG_IFC_NTS_CLK 156 -#define GCC_WCSS_ECAHB_CLK 157 -#define GCC_WCSS_MST_ASYNC_BDG_CLK 158 -#define GCC_WCSS_SLV_ASYNC_BDG_CLK 159 #define GCC_XO_CLK 160 #define GCC_XO_CLK_SRC 161 #define GCC_XO_DIV4_CLK 162 diff --git a/include/dt-bindings/clock/qcom,ipq5424-gcc.h b/include/dt-bindings/clock/qcom,ipq5424-gcc.h new file mode 100644 index 000000000000..755ce7a71c7c --- /dev/null +++ b/include/dt-bindings/clock/qcom,ipq5424-gcc.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5424_H +#define _DT_BINDINGS_CLOCK_IPQ_GCC_IPQ5424_H + +#define GPLL0 0 +#define GPLL4 1 +#define GPLL2 2 +#define GPLL2_OUT_MAIN 3 +#define GCC_SLEEP_CLK_SRC 4 +#define GCC_APSS_DBG_CLK 5 +#define GCC_USB0_EUD_AT_CLK 6 +#define GCC_PCIE0_AXI_M_CLK_SRC 7 +#define GCC_PCIE0_AXI_M_CLK 8 +#define GCC_PCIE1_AXI_M_CLK_SRC 9 +#define GCC_PCIE1_AXI_M_CLK 10 +#define GCC_PCIE2_AXI_M_CLK_SRC 11 +#define GCC_PCIE2_AXI_M_CLK 12 +#define GCC_PCIE3_AXI_M_CLK_SRC 13 +#define GCC_PCIE3_AXI_M_CLK 14 +#define GCC_PCIE0_AXI_S_CLK_SRC 15 +#define GCC_PCIE0_AXI_S_BRIDGE_CLK 16 +#define GCC_PCIE0_AXI_S_CLK 17 +#define GCC_PCIE1_AXI_S_CLK_SRC 18 +#define GCC_PCIE1_AXI_S_BRIDGE_CLK 19 +#define GCC_PCIE1_AXI_S_CLK 20 +#define GCC_PCIE2_AXI_S_CLK_SRC 21 +#define GCC_PCIE2_AXI_S_BRIDGE_CLK 22 +#define GCC_PCIE2_AXI_S_CLK 23 +#define GCC_PCIE3_AXI_S_CLK_SRC 24 +#define GCC_PCIE3_AXI_S_BRIDGE_CLK 25 +#define GCC_PCIE3_AXI_S_CLK 26 +#define GCC_PCIE0_PIPE_CLK_SRC 27 +#define GCC_PCIE0_PIPE_CLK 28 +#define GCC_PCIE1_PIPE_CLK_SRC 29 +#define GCC_PCIE1_PIPE_CLK 30 +#define GCC_PCIE2_PIPE_CLK_SRC 31 +#define GCC_PCIE2_PIPE_CLK 32 +#define GCC_PCIE3_PIPE_CLK_SRC 33 +#define GCC_PCIE3_PIPE_CLK 34 +#define GCC_PCIE_AUX_CLK_SRC 35 +#define GCC_PCIE0_AUX_CLK 36 +#define GCC_PCIE1_AUX_CLK 37 +#define GCC_PCIE2_AUX_CLK 38 +#define GCC_PCIE3_AUX_CLK 39 +#define GCC_PCIE0_AHB_CLK 40 +#define GCC_PCIE1_AHB_CLK 41 +#define GCC_PCIE2_AHB_CLK 42 +#define GCC_PCIE3_AHB_CLK 43 +#define GCC_USB0_AUX_CLK_SRC 44 +#define GCC_USB0_AUX_CLK 45 +#define GCC_USB0_MASTER_CLK 46 +#define GCC_USB0_MOCK_UTMI_CLK_SRC 47 +#define GCC_USB0_MOCK_UTMI_DIV_CLK_SRC 48 +#define GCC_USB0_MOCK_UTMI_CLK 49 +#define GCC_USB0_PIPE_CLK_SRC 50 +#define GCC_USB0_PIPE_CLK 51 +#define GCC_USB0_PHY_CFG_AHB_CLK 52 +#define GCC_USB0_SLEEP_CLK 53 +#define GCC_SDCC1_APPS_CLK_SRC 54 +#define GCC_SDCC1_APPS_CLK 55 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 56 +#define GCC_SDCC1_ICE_CORE_CLK 57 +#define GCC_SDCC1_AHB_CLK 58 +#define GCC_PCNOC_BFDCD_CLK_SRC 59 +#define GCC_NSSCFG_CLK 60 +#define GCC_NSSNOC_NSSCC_CLK 61 +#define GCC_NSSCC_CLK 62 +#define GCC_NSSNOC_PCNOC_1_CLK 63 +#define GCC_QPIC_AHB_CLK 64 +#define GCC_QPIC_CLK 65 +#define GCC_MDIO_AHB_CLK 66 +#define GCC_PRNG_AHB_CLK 67 +#define GCC_UNIPHY0_AHB_CLK 68 +#define GCC_UNIPHY1_AHB_CLK 69 +#define GCC_UNIPHY2_AHB_CLK 70 +#define GCC_CMN_12GPLL_AHB_CLK 71 +#define GCC_SYSTEM_NOC_BFDCD_CLK_SRC 72 +#define GCC_NSSNOC_SNOC_CLK 73 +#define GCC_NSSNOC_SNOC_1_CLK 74 +#define GCC_WCSS_AHB_CLK_SRC 75 +#define GCC_QDSS_AT_CLK_SRC 76 +#define GCC_NSSNOC_ATB_CLK 77 +#define GCC_QDSS_AT_CLK 78 +#define GCC_QDSS_TSCTR_CLK_SRC 79 +#define GCC_NSS_TS_CLK 80 +#define GCC_QPIC_IO_MACRO_CLK_SRC 81 +#define GCC_QPIC_IO_MACRO_CLK 82 +#define GCC_LPASS_AXIM_CLK_SRC 83 +#define GCC_LPASS_CORE_AXIM_CLK 84 +#define GCC_LPASS_SWAY_CLK_SRC 85 +#define GCC_LPASS_SWAY_CLK 86 +#define GCC_CNOC_LPASS_CFG_CLK 87 +#define GCC_SNOC_LPASS_CLK 88 +#define GCC_ADSS_PWM_CLK_SRC 89 +#define GCC_ADSS_PWM_CLK 90 +#define GCC_XO_CLK_SRC 91 +#define GCC_NSSNOC_XO_DCD_CLK 92 +#define GCC_NSSNOC_QOSGEN_REF_CLK 93 +#define GCC_NSSNOC_TIMEOUT_REF_CLK 94 +#define GCC_UNIPHY0_SYS_CLK 95 +#define GCC_UNIPHY1_SYS_CLK 96 +#define GCC_UNIPHY2_SYS_CLK 97 +#define GCC_CMN_12GPLL_SYS_CLK 98 +#define GCC_UNIPHY_SYS_CLK_SRC 99 +#define GCC_NSS_TS_CLK_SRC 100 +#define GCC_ANOC_PCIE0_1LANE_M_CLK 101 +#define GCC_ANOC_PCIE1_1LANE_M_CLK 102 +#define GCC_ANOC_PCIE2_2LANE_M_CLK 103 +#define GCC_ANOC_PCIE3_2LANE_M_CLK 104 +#define GCC_CNOC_PCIE0_1LANE_S_CLK 105 +#define GCC_CNOC_PCIE1_1LANE_S_CLK 106 +#define GCC_CNOC_PCIE2_2LANE_S_CLK 107 +#define GCC_CNOC_PCIE3_2LANE_S_CLK 108 +#define GCC_CNOC_USB_CLK 109 +#define GCC_CNOC_WCSS_AHB_CLK 110 +#define GCC_QUPV3_AHB_MST_CLK 111 +#define GCC_QUPV3_AHB_SLV_CLK 112 +#define GCC_QUPV3_I2C0_CLK 113 +#define GCC_QUPV3_I2C1_CLK 114 +#define GCC_QUPV3_SPI0_CLK 115 +#define GCC_QUPV3_SPI1_CLK 116 +#define GCC_QUPV3_UART0_CLK 117 +#define GCC_QUPV3_UART1_CLK 118 +#define GCC_QPIC_CLK_SRC 119 +#define GCC_QUPV3_I2C0_CLK_SRC 120 +#define GCC_QUPV3_I2C1_CLK_SRC 121 +#define GCC_QUPV3_I2C0_DIV_CLK_SRC 122 +#define GCC_QUPV3_I2C1_DIV_CLK_SRC 123 +#define GCC_QUPV3_SPI0_CLK_SRC 124 +#define GCC_QUPV3_SPI1_CLK_SRC 125 +#define GCC_QUPV3_UART0_CLK_SRC 126 +#define GCC_QUPV3_UART1_CLK_SRC 127 +#define GCC_USB1_MASTER_CLK 128 +#define GCC_USB1_MOCK_UTMI_CLK_SRC 129 +#define GCC_USB1_MOCK_UTMI_DIV_CLK_SRC 130 +#define GCC_USB1_MOCK_UTMI_CLK 131 +#define GCC_USB1_SLEEP_CLK 132 +#define GCC_USB1_PHY_CFG_AHB_CLK 133 +#define GCC_USB0_MASTER_CLK_SRC 134 +#define GCC_QDSS_DAP_CLK 135 +#define GCC_PCIE0_RCHNG_CLK_SRC 136 +#define GCC_PCIE0_RCHNG_CLK 137 +#define GCC_PCIE1_RCHNG_CLK_SRC 138 +#define GCC_PCIE1_RCHNG_CLK 139 +#define GCC_PCIE2_RCHNG_CLK_SRC 140 +#define GCC_PCIE2_RCHNG_CLK 141 +#define GCC_PCIE3_RCHNG_CLK_SRC 142 +#define GCC_PCIE3_RCHNG_CLK 143 +#define GCC_IM_SLEEP_CLK 144 + +#endif diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h index 52123c5a09fa..f238aa4794a8 100644 --- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h +++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h @@ -132,16 +132,8 @@ #define GCC_NSSNOC_SNOC_1_CLK 123 #define GCC_QDSS_ETR_USB_CLK 124 #define WCSS_AHB_CLK_SRC 125 -#define GCC_Q6_AHB_CLK 126 -#define GCC_Q6_AHB_S_CLK 127 -#define GCC_WCSS_ECAHB_CLK 128 -#define GCC_WCSS_ACMT_CLK 129 -#define GCC_SYS_NOC_WCSS_AHB_CLK 130 #define WCSS_AXI_M_CLK_SRC 131 -#define GCC_ANOC_WCSS_AXI_M_CLK 132 #define QDSS_AT_CLK_SRC 133 -#define GCC_Q6SS_ATBM_CLK 134 -#define GCC_WCSS_DBG_IFC_ATB_CLK 135 #define GCC_NSSNOC_ATB_CLK 136 #define GCC_QDSS_AT_CLK 137 #define GCC_SYS_NOC_AT_CLK 138 @@ -154,27 +146,18 @@ #define QDSS_TRACECLKIN_CLK_SRC 145 #define GCC_QDSS_TRACECLKIN_CLK 146 #define QDSS_TSCTR_CLK_SRC 147 -#define GCC_Q6_TSCTR_1TO2_CLK 148 -#define GCC_WCSS_DBG_IFC_NTS_CLK 149 #define GCC_QDSS_TSCTR_DIV2_CLK 150 #define GCC_QDSS_TS_CLK 151 #define GCC_QDSS_TSCTR_DIV4_CLK 152 #define GCC_NSS_TS_CLK 153 #define GCC_QDSS_TSCTR_DIV8_CLK 154 #define GCC_QDSS_TSCTR_DIV16_CLK 155 -#define GCC_Q6SS_PCLKDBG_CLK 156 -#define GCC_Q6SS_TRIG_CLK 157 -#define GCC_WCSS_DBG_IFC_APB_CLK 158 -#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 159 #define GCC_QDSS_DAP_CLK 160 #define GCC_QDSS_APB2JTAG_CLK 161 #define GCC_QDSS_TSCTR_DIV3_CLK 162 #define QPIC_IO_MACRO_CLK_SRC 163 #define GCC_QPIC_IO_MACRO_CLK 164 #define Q6_AXI_CLK_SRC 165 -#define GCC_Q6_AXIM_CLK 166 -#define GCC_WCSS_Q6_TBU_CLK 167 -#define GCC_MEM_NOC_Q6_AXI_CLK 168 #define Q6_AXIM2_CLK_SRC 169 #define NSSNOC_MEMNOC_BFDCD_CLK_SRC 170 #define GCC_NSSNOC_MEMNOC_CLK 171 @@ -199,7 +182,6 @@ #define GCC_UNIPHY2_SYS_CLK 190 #define GCC_CMN_12GPLL_SYS_CLK 191 #define GCC_NSSNOC_XO_DCD_CLK 192 -#define GCC_Q6SS_BOOT_CLK 193 #define UNIPHY_SYS_CLK_SRC 194 #define NSS_TS_CLK_SRC 195 #define GCC_ANOC_PCIE0_1LANE_M_CLK 196 diff --git a/include/dt-bindings/clock/qcom,qcs8300-gcc.h b/include/dt-bindings/clock/qcom,qcs8300-gcc.h new file mode 100644 index 000000000000..a0083b1d2126 --- /dev/null +++ b/include/dt-bindings/clock/qcom,qcs8300-gcc.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QCS8300_H +#define _DT_BINDINGS_CLK_QCOM_GCC_QCS8300_H + +/* GCC clocks */ +#define GCC_GPLL0 0 +#define GCC_GPLL0_OUT_EVEN 1 +#define GCC_GPLL1 2 +#define GCC_GPLL4 3 +#define GCC_GPLL7 4 +#define GCC_GPLL9 5 +#define GCC_AGGRE_NOC_QUPV3_AXI_CLK 6 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 7 +#define GCC_AGGRE_USB2_PRIM_AXI_CLK 8 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 9 +#define GCC_AHB2PHY0_CLK 10 +#define GCC_AHB2PHY2_CLK 11 +#define GCC_AHB2PHY3_CLK 12 +#define GCC_BOOT_ROM_AHB_CLK 13 +#define GCC_CAMERA_AHB_CLK 14 +#define GCC_CAMERA_HF_AXI_CLK 15 +#define GCC_CAMERA_SF_AXI_CLK 16 +#define GCC_CAMERA_THROTTLE_XO_CLK 17 +#define GCC_CAMERA_XO_CLK 18 +#define GCC_CFG_NOC_USB2_PRIM_AXI_CLK 19 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 20 +#define GCC_DDRSS_GPU_AXI_CLK 21 +#define GCC_DISP_AHB_CLK 22 +#define GCC_DISP_HF_AXI_CLK 23 +#define GCC_DISP_XO_CLK 24 +#define GCC_EDP_REF_CLKREF_EN 25 +#define GCC_EMAC0_AXI_CLK 26 +#define GCC_EMAC0_PHY_AUX_CLK 27 +#define GCC_EMAC0_PHY_AUX_CLK_SRC 28 +#define GCC_EMAC0_PTP_CLK 29 +#define GCC_EMAC0_PTP_CLK_SRC 30 +#define GCC_EMAC0_RGMII_CLK 31 +#define GCC_EMAC0_RGMII_CLK_SRC 32 +#define GCC_EMAC0_SLV_AHB_CLK 33 +#define GCC_GP1_CLK 34 +#define GCC_GP1_CLK_SRC 35 +#define GCC_GP2_CLK 36 +#define GCC_GP2_CLK_SRC 37 +#define GCC_GP3_CLK 38 +#define GCC_GP3_CLK_SRC 39 +#define GCC_GP4_CLK 40 +#define GCC_GP4_CLK_SRC 41 +#define GCC_GP5_CLK 42 +#define GCC_GP5_CLK_SRC 43 +#define GCC_GPU_CFG_AHB_CLK 44 +#define GCC_GPU_GPLL0_CLK_SRC 45 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 46 +#define GCC_GPU_MEMNOC_GFX_CENTER_PIPELINE_CLK 47 +#define GCC_GPU_MEMNOC_GFX_CLK 48 +#define GCC_GPU_SNOC_DVM_GFX_CLK 49 +#define GCC_GPU_TCU_THROTTLE_AHB_CLK 50 +#define GCC_GPU_TCU_THROTTLE_CLK 51 +#define GCC_PCIE_0_AUX_CLK 52 +#define GCC_PCIE_0_AUX_CLK_SRC 53 +#define GCC_PCIE_0_CFG_AHB_CLK 54 +#define GCC_PCIE_0_MSTR_AXI_CLK 55 +#define GCC_PCIE_0_PHY_AUX_CLK 56 +#define GCC_PCIE_0_PHY_AUX_CLK_SRC 57 +#define GCC_PCIE_0_PHY_RCHNG_CLK 58 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 59 +#define GCC_PCIE_0_PIPE_CLK 60 +#define GCC_PCIE_0_PIPE_CLK_SRC 61 +#define GCC_PCIE_0_PIPE_DIV_CLK_SRC 62 +#define GCC_PCIE_0_PIPEDIV2_CLK 63 +#define GCC_PCIE_0_SLV_AXI_CLK 64 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 65 +#define GCC_PCIE_1_AUX_CLK 66 +#define GCC_PCIE_1_AUX_CLK_SRC 67 +#define GCC_PCIE_1_CFG_AHB_CLK 68 +#define GCC_PCIE_1_MSTR_AXI_CLK 69 +#define GCC_PCIE_1_PHY_AUX_CLK 70 +#define GCC_PCIE_1_PHY_AUX_CLK_SRC 71 +#define GCC_PCIE_1_PHY_RCHNG_CLK 72 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 73 +#define GCC_PCIE_1_PIPE_CLK 74 +#define GCC_PCIE_1_PIPE_CLK_SRC 75 +#define GCC_PCIE_1_PIPE_DIV_CLK_SRC 76 +#define GCC_PCIE_1_PIPEDIV2_CLK 77 +#define GCC_PCIE_1_SLV_AXI_CLK 78 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 79 +#define GCC_PCIE_CLKREF_EN 80 +#define GCC_PCIE_THROTTLE_CFG_CLK 81 +#define GCC_PDM2_CLK 82 +#define GCC_PDM2_CLK_SRC 83 +#define GCC_PDM_AHB_CLK 84 +#define GCC_PDM_XO4_CLK 85 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 86 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 87 +#define GCC_QMIP_DISP_AHB_CLK 88 +#define GCC_QMIP_DISP_ROT_AHB_CLK 89 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 90 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 91 +#define GCC_QMIP_VIDEO_VCPU_AHB_CLK 92 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 93 +#define GCC_QUPV3_WRAP0_CORE_CLK 94 +#define GCC_QUPV3_WRAP0_S0_CLK 95 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 96 +#define GCC_QUPV3_WRAP0_S1_CLK 97 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 98 +#define GCC_QUPV3_WRAP0_S2_CLK 99 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 100 +#define GCC_QUPV3_WRAP0_S3_CLK 101 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 102 +#define GCC_QUPV3_WRAP0_S4_CLK 103 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 104 +#define GCC_QUPV3_WRAP0_S5_CLK 105 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 106 +#define GCC_QUPV3_WRAP0_S6_CLK 107 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 108 +#define GCC_QUPV3_WRAP0_S7_CLK 109 +#define GCC_QUPV3_WRAP0_S7_CLK_SRC 110 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 111 +#define GCC_QUPV3_WRAP1_CORE_CLK 112 +#define GCC_QUPV3_WRAP1_S0_CLK 113 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 114 +#define GCC_QUPV3_WRAP1_S1_CLK 115 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 116 +#define GCC_QUPV3_WRAP1_S2_CLK 117 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 118 +#define GCC_QUPV3_WRAP1_S3_CLK 119 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 120 +#define GCC_QUPV3_WRAP1_S4_CLK 121 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 122 +#define GCC_QUPV3_WRAP1_S5_CLK 123 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 124 +#define GCC_QUPV3_WRAP1_S6_CLK 125 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 126 +#define GCC_QUPV3_WRAP1_S7_CLK 127 +#define GCC_QUPV3_WRAP1_S7_CLK_SRC 128 +#define GCC_QUPV3_WRAP3_CORE_2X_CLK 129 +#define GCC_QUPV3_WRAP3_CORE_CLK 130 +#define GCC_QUPV3_WRAP3_QSPI_CLK 131 +#define GCC_QUPV3_WRAP3_S0_CLK 132 +#define GCC_QUPV3_WRAP3_S0_CLK_SRC 133 +#define GCC_QUPV3_WRAP3_S0_DIV_CLK_SRC 134 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 135 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 136 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 137 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 138 +#define GCC_QUPV3_WRAP_3_M_AHB_CLK 139 +#define GCC_QUPV3_WRAP_3_S_AHB_CLK 140 +#define GCC_SDCC1_AHB_CLK 141 +#define GCC_SDCC1_APPS_CLK 142 +#define GCC_SDCC1_APPS_CLK_SRC 143 +#define GCC_SDCC1_ICE_CORE_CLK 144 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 145 +#define GCC_SGMI_CLKREF_EN 146 +#define GCC_UFS_PHY_AHB_CLK 147 +#define GCC_UFS_PHY_AXI_CLK 148 +#define GCC_UFS_PHY_AXI_CLK_SRC 149 +#define GCC_UFS_PHY_ICE_CORE_CLK 150 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 151 +#define GCC_UFS_PHY_PHY_AUX_CLK 152 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 153 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 154 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 155 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 156 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 157 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 158 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 159 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 160 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 161 +#define GCC_USB20_MASTER_CLK 162 +#define GCC_USB20_MASTER_CLK_SRC 163 +#define GCC_USB20_MOCK_UTMI_CLK 164 +#define GCC_USB20_MOCK_UTMI_CLK_SRC 165 +#define GCC_USB20_MOCK_UTMI_POSTDIV_CLK_SRC 166 +#define GCC_USB20_SLEEP_CLK 167 +#define GCC_USB30_PRIM_MASTER_CLK 168 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 169 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 170 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 171 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 172 +#define GCC_USB30_PRIM_SLEEP_CLK 173 +#define GCC_USB3_PRIM_PHY_AUX_CLK 174 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 175 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 176 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 177 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 178 +#define GCC_USB_CLKREF_EN 179 +#define GCC_VIDEO_AHB_CLK 180 +#define GCC_VIDEO_AXI0_CLK 181 +#define GCC_VIDEO_AXI1_CLK 182 +#define GCC_VIDEO_XO_CLK 183 + +/* GCC power domains */ +#define GCC_EMAC0_GDSC 0 +#define GCC_PCIE_0_GDSC 1 +#define GCC_PCIE_1_GDSC 2 +#define GCC_UFS_PHY_GDSC 3 +#define GCC_USB20_PRIM_GDSC 4 +#define GCC_USB30_PRIM_GDSC 5 + +/* GCC resets */ +#define GCC_EMAC0_BCR 0 +#define GCC_PCIE_0_BCR 1 +#define GCC_PCIE_0_LINK_DOWN_BCR 2 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 3 +#define GCC_PCIE_0_PHY_BCR 4 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_1_BCR 6 +#define GCC_PCIE_1_LINK_DOWN_BCR 7 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 8 +#define GCC_PCIE_1_PHY_BCR 9 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 10 +#define GCC_SDCC1_BCR 11 +#define GCC_UFS_PHY_BCR 12 +#define GCC_USB20_PRIM_BCR 13 +#define GCC_USB2_PHY_PRIM_BCR 14 +#define GCC_USB2_PHY_SEC_BCR 15 +#define GCC_USB30_PRIM_BCR 16 +#define GCC_USB3_DP_PHY_PRIM_BCR 17 +#define GCC_USB3_PHY_PRIM_BCR 18 +#define GCC_USB3_PHY_TERT_BCR 19 +#define GCC_USB3_UNIPHY_MP0_BCR 20 +#define GCC_USB3_UNIPHY_MP1_BCR 21 +#define GCC_USB3PHY_PHY_PRIM_BCR 22 +#define GCC_USB3UNIPHY_PHY_MP0_BCR 23 +#define GCC_USB3UNIPHY_PHY_MP1_BCR 24 +#define GCC_USB_PHY_CFG_AHB2PHY_BCR 25 +#define GCC_VIDEO_BCR 26 +#define GCC_VIDEO_AXI0_CLK_ARES 27 +#define GCC_VIDEO_AXI1_CLK_ARES 28 + +#endif diff --git a/include/dt-bindings/clock/qcom,sa8775p-camcc.h b/include/dt-bindings/clock/qcom,sa8775p-camcc.h new file mode 100644 index 000000000000..38531acd699f --- /dev/null +++ b/include/dt-bindings/clock/qcom,sa8775p-camcc.h @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H +#define _DT_BINDINGS_CLK_QCOM_SA8775P_CAM_CC_H + +/* CAM_CC clocks */ +#define CAM_CC_CAMNOC_AXI_CLK 0 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 1 +#define CAM_CC_CAMNOC_DCD_XO_CLK 2 +#define CAM_CC_CAMNOC_XO_CLK 3 +#define CAM_CC_CCI_0_CLK 4 +#define CAM_CC_CCI_0_CLK_SRC 5 +#define CAM_CC_CCI_1_CLK 6 +#define CAM_CC_CCI_1_CLK_SRC 7 +#define CAM_CC_CCI_2_CLK 8 +#define CAM_CC_CCI_2_CLK_SRC 9 +#define CAM_CC_CCI_3_CLK 10 +#define CAM_CC_CCI_3_CLK_SRC 11 +#define CAM_CC_CORE_AHB_CLK 12 +#define CAM_CC_CPAS_AHB_CLK 13 +#define CAM_CC_CPAS_FAST_AHB_CLK 14 +#define CAM_CC_CPAS_IFE_0_CLK 15 +#define CAM_CC_CPAS_IFE_1_CLK 16 +#define CAM_CC_CPAS_IFE_LITE_CLK 17 +#define CAM_CC_CPAS_IPE_CLK 18 +#define CAM_CC_CPAS_SFE_LITE_0_CLK 19 +#define CAM_CC_CPAS_SFE_LITE_1_CLK 20 +#define CAM_CC_CPHY_RX_CLK_SRC 21 +#define CAM_CC_CSI0PHYTIMER_CLK 22 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 23 +#define CAM_CC_CSI1PHYTIMER_CLK 24 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 25 +#define CAM_CC_CSI2PHYTIMER_CLK 26 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 27 +#define CAM_CC_CSI3PHYTIMER_CLK 28 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 29 +#define CAM_CC_CSID_CLK 30 +#define CAM_CC_CSID_CLK_SRC 31 +#define CAM_CC_CSID_CSIPHY_RX_CLK 32 +#define CAM_CC_CSIPHY0_CLK 33 +#define CAM_CC_CSIPHY1_CLK 34 +#define CAM_CC_CSIPHY2_CLK 35 +#define CAM_CC_CSIPHY3_CLK 36 +#define CAM_CC_FAST_AHB_CLK_SRC 37 +#define CAM_CC_GDSC_CLK 38 +#define CAM_CC_ICP_AHB_CLK 39 +#define CAM_CC_ICP_CLK 40 +#define CAM_CC_ICP_CLK_SRC 41 +#define CAM_CC_IFE_0_CLK 42 +#define CAM_CC_IFE_0_CLK_SRC 43 +#define CAM_CC_IFE_0_FAST_AHB_CLK 44 +#define CAM_CC_IFE_1_CLK 45 +#define CAM_CC_IFE_1_CLK_SRC 46 +#define CAM_CC_IFE_1_FAST_AHB_CLK 47 +#define CAM_CC_IFE_LITE_AHB_CLK 48 +#define CAM_CC_IFE_LITE_CLK 49 +#define CAM_CC_IFE_LITE_CLK_SRC 50 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 51 +#define CAM_CC_IFE_LITE_CSID_CLK 52 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 53 +#define CAM_CC_IPE_AHB_CLK 54 +#define CAM_CC_IPE_CLK 55 +#define CAM_CC_IPE_CLK_SRC 56 +#define CAM_CC_IPE_FAST_AHB_CLK 57 +#define CAM_CC_MCLK0_CLK 58 +#define CAM_CC_MCLK0_CLK_SRC 59 +#define CAM_CC_MCLK1_CLK 60 +#define CAM_CC_MCLK1_CLK_SRC 61 +#define CAM_CC_MCLK2_CLK 62 +#define CAM_CC_MCLK2_CLK_SRC 63 +#define CAM_CC_MCLK3_CLK 64 +#define CAM_CC_MCLK3_CLK_SRC 65 +#define CAM_CC_PLL0 66 +#define CAM_CC_PLL0_OUT_EVEN 67 +#define CAM_CC_PLL0_OUT_ODD 68 +#define CAM_CC_PLL2 69 +#define CAM_CC_PLL3 70 +#define CAM_CC_PLL3_OUT_EVEN 71 +#define CAM_CC_PLL4 72 +#define CAM_CC_PLL4_OUT_EVEN 73 +#define CAM_CC_PLL5 74 +#define CAM_CC_PLL5_OUT_EVEN 75 +#define CAM_CC_SFE_LITE_0_CLK 76 +#define CAM_CC_SFE_LITE_0_FAST_AHB_CLK 77 +#define CAM_CC_SFE_LITE_1_CLK 78 +#define CAM_CC_SFE_LITE_1_FAST_AHB_CLK 79 +#define CAM_CC_SLEEP_CLK 80 +#define CAM_CC_SLEEP_CLK_SRC 81 +#define CAM_CC_SLOW_AHB_CLK_SRC 82 +#define CAM_CC_SM_OBS_CLK 83 +#define CAM_CC_XO_CLK_SRC 84 +#define CAM_CC_QDSS_DEBUG_XO_CLK 85 + +/* CAM_CC power domains */ +#define CAM_CC_TITAN_TOP_GDSC 0 + +/* CAM_CC resets */ +#define CAM_CC_ICP_BCR 0 +#define CAM_CC_IFE_0_BCR 1 +#define CAM_CC_IFE_1_BCR 2 +#define CAM_CC_IPE_0_BCR 3 +#define CAM_CC_SFE_LITE_0_BCR 4 +#define CAM_CC_SFE_LITE_1_BCR 5 + +#endif diff --git a/include/dt-bindings/clock/qcom,sa8775p-dispcc.h b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h new file mode 100644 index 000000000000..e2049e510658 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sa8775p-dispcc.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H +#define _DT_BINDINGS_CLK_QCOM_SA8775P_DISP_CC_H + +/* DISP_CC_0/1 clocks */ +#define MDSS_DISP_CC_MDSS_AHB1_CLK 0 +#define MDSS_DISP_CC_MDSS_AHB_CLK 1 +#define MDSS_DISP_CC_MDSS_AHB_CLK_SRC 2 +#define MDSS_DISP_CC_MDSS_BYTE0_CLK 3 +#define MDSS_DISP_CC_MDSS_BYTE0_CLK_SRC 4 +#define MDSS_DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 5 +#define MDSS_DISP_CC_MDSS_BYTE0_INTF_CLK 6 +#define MDSS_DISP_CC_MDSS_BYTE1_CLK 7 +#define MDSS_DISP_CC_MDSS_BYTE1_CLK_SRC 8 +#define MDSS_DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 9 +#define MDSS_DISP_CC_MDSS_BYTE1_INTF_CLK 10 +#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK 11 +#define MDSS_DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 12 +#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK 13 +#define MDSS_DISP_CC_MDSS_DPTX0_CRYPTO_CLK_SRC 14 +#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK 15 +#define MDSS_DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 16 +#define MDSS_DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 17 +#define MDSS_DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 18 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK 19 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 20 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK 21 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 22 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK 23 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL2_CLK_SRC 24 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK 25 +#define MDSS_DISP_CC_MDSS_DPTX0_PIXEL3_CLK_SRC 26 +#define MDSS_DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 27 +#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK 28 +#define MDSS_DISP_CC_MDSS_DPTX1_AUX_CLK_SRC 29 +#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK 30 +#define MDSS_DISP_CC_MDSS_DPTX1_CRYPTO_CLK_SRC 31 +#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK 32 +#define MDSS_DISP_CC_MDSS_DPTX1_LINK_CLK_SRC 33 +#define MDSS_DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC 34 +#define MDSS_DISP_CC_MDSS_DPTX1_LINK_INTF_CLK 35 +#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK 36 +#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC 37 +#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK 38 +#define MDSS_DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC 39 +#define MDSS_DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK 40 +#define MDSS_DISP_CC_MDSS_ESC0_CLK 41 +#define MDSS_DISP_CC_MDSS_ESC0_CLK_SRC 42 +#define MDSS_DISP_CC_MDSS_ESC1_CLK 43 +#define MDSS_DISP_CC_MDSS_ESC1_CLK_SRC 44 +#define MDSS_DISP_CC_MDSS_MDP1_CLK 45 +#define MDSS_DISP_CC_MDSS_MDP_CLK 46 +#define MDSS_DISP_CC_MDSS_MDP_CLK_SRC 47 +#define MDSS_DISP_CC_MDSS_MDP_LUT1_CLK 48 +#define MDSS_DISP_CC_MDSS_MDP_LUT_CLK 49 +#define MDSS_DISP_CC_MDSS_NON_GDSC_AHB_CLK 50 +#define MDSS_DISP_CC_MDSS_PCLK0_CLK 51 +#define MDSS_DISP_CC_MDSS_PCLK0_CLK_SRC 52 +#define MDSS_DISP_CC_MDSS_PCLK1_CLK 53 +#define MDSS_DISP_CC_MDSS_PCLK1_CLK_SRC 54 +#define MDSS_DISP_CC_MDSS_PLL_LOCK_MONITOR_CLK 55 +#define MDSS_DISP_CC_MDSS_RSCC_AHB_CLK 56 +#define MDSS_DISP_CC_MDSS_RSCC_VSYNC_CLK 57 +#define MDSS_DISP_CC_MDSS_VSYNC1_CLK 58 +#define MDSS_DISP_CC_MDSS_VSYNC_CLK 59 +#define MDSS_DISP_CC_MDSS_VSYNC_CLK_SRC 60 +#define MDSS_DISP_CC_PLL0 61 +#define MDSS_DISP_CC_PLL1 62 +#define MDSS_DISP_CC_SLEEP_CLK 63 +#define MDSS_DISP_CC_SLEEP_CLK_SRC 64 +#define MDSS_DISP_CC_SM_OBS_CLK 65 +#define MDSS_DISP_CC_XO_CLK 66 +#define MDSS_DISP_CC_XO_CLK_SRC 67 + +/* DISP_CC_0/1 power domains */ +#define MDSS_DISP_CC_MDSS_CORE_GDSC 0 +#define MDSS_DISP_CC_MDSS_CORE_INT2_GDSC 1 + +/* DISP_CC_0/1 resets */ +#define MDSS_DISP_CC_MDSS_CORE_BCR 0 +#define MDSS_DISP_CC_MDSS_RSCC_BCR 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,sa8775p-videocc.h b/include/dt-bindings/clock/qcom,sa8775p-videocc.h new file mode 100644 index 000000000000..e6325f68c317 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sa8775p-videocc.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H +#define _DT_BINDINGS_CLK_QCOM_SA8775P_VIDEO_CC_H + +/* VIDEO_CC clocks */ +#define VIDEO_CC_AHB_CLK 0 +#define VIDEO_CC_AHB_CLK_SRC 1 +#define VIDEO_CC_MVS0_CLK 2 +#define VIDEO_CC_MVS0_CLK_SRC 3 +#define VIDEO_CC_MVS0_DIV_CLK_SRC 4 +#define VIDEO_CC_MVS0C_CLK 5 +#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 6 +#define VIDEO_CC_MVS1_CLK 7 +#define VIDEO_CC_MVS1_CLK_SRC 8 +#define VIDEO_CC_MVS1_DIV_CLK_SRC 9 +#define VIDEO_CC_MVS1C_CLK 10 +#define VIDEO_CC_MVS1C_DIV2_DIV_CLK_SRC 11 +#define VIDEO_CC_PLL_LOCK_MONITOR_CLK 12 +#define VIDEO_CC_SLEEP_CLK 13 +#define VIDEO_CC_SLEEP_CLK_SRC 14 +#define VIDEO_CC_SM_DIV_CLK_SRC 15 +#define VIDEO_CC_SM_OBS_CLK 16 +#define VIDEO_CC_XO_CLK 17 +#define VIDEO_CC_XO_CLK_SRC 18 +#define VIDEO_PLL0 19 +#define VIDEO_PLL1 20 + +/* VIDEO_CC power domains */ +#define VIDEO_CC_MVS0C_GDSC 0 +#define VIDEO_CC_MVS0_GDSC 1 +#define VIDEO_CC_MVS1C_GDSC 2 +#define VIDEO_CC_MVS1_GDSC 3 + +/* VIDEO_CC resets */ +#define VIDEO_CC_INTERFACE_BCR 0 +#define VIDEO_CC_MVS0_BCR 1 +#define VIDEO_CC_MVS0C_CLK_ARES 2 +#define VIDEO_CC_MVS0C_BCR 3 +#define VIDEO_CC_MVS1_BCR 4 +#define VIDEO_CC_MVS1C_CLK_ARES 5 +#define VIDEO_CC_MVS1C_BCR 6 + +#endif diff --git a/include/dt-bindings/clock/qcom,sar2130p-gcc.h b/include/dt-bindings/clock/qcom,sar2130p-gcc.h new file mode 100644 index 000000000000..69d2dd2538a6 --- /dev/null +++ b/include/dt-bindings/clock/qcom,sar2130p-gcc.h @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ +/* + * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SAR2130P_H +#define _DT_BINDINGS_CLK_QCOM_GCC_SAR2130P_H + +/* GCC clocks */ +#define GCC_GPLL0 0 +#define GCC_GPLL0_OUT_EVEN 1 +#define GCC_GPLL1 2 +#define GCC_GPLL9 3 +#define GCC_GPLL9_OUT_EVEN 4 +#define GCC_AGGRE_NOC_PCIE_1_AXI_CLK 5 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 6 +#define GCC_BOOT_ROM_AHB_CLK 7 +#define GCC_CAMERA_AHB_CLK 8 +#define GCC_CAMERA_HF_AXI_CLK 9 +#define GCC_CAMERA_SF_AXI_CLK 10 +#define GCC_CAMERA_XO_CLK 11 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 12 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 13 +#define GCC_DDRSS_GPU_AXI_CLK 14 +#define GCC_DDRSS_PCIE_SF_CLK 15 +#define GCC_DISP_AHB_CLK 16 +#define GCC_DISP_HF_AXI_CLK 17 +#define GCC_GP1_CLK 18 +#define GCC_GP1_CLK_SRC 19 +#define GCC_GP2_CLK 20 +#define GCC_GP2_CLK_SRC 21 +#define GCC_GP3_CLK 22 +#define GCC_GP3_CLK_SRC 23 +#define GCC_GPU_CFG_AHB_CLK 24 +#define GCC_GPU_GPLL0_CLK_SRC 25 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 26 +#define GCC_GPU_MEMNOC_GFX_CLK 27 +#define GCC_GPU_SNOC_DVM_GFX_CLK 28 +#define GCC_IRIS_SS_HF_AXI1_CLK 29 +#define GCC_IRIS_SS_SPD_AXI1_CLK 30 +#define GCC_PCIE_0_AUX_CLK 31 +#define GCC_PCIE_0_AUX_CLK_SRC 32 +#define GCC_PCIE_0_CFG_AHB_CLK 33 +#define GCC_PCIE_0_MSTR_AXI_CLK 34 +#define GCC_PCIE_0_PHY_RCHNG_CLK 35 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 36 +#define GCC_PCIE_0_PIPE_CLK 37 +#define GCC_PCIE_0_PIPE_CLK_SRC 38 +#define GCC_PCIE_0_SLV_AXI_CLK 39 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 40 +#define GCC_PCIE_1_AUX_CLK 41 +#define GCC_PCIE_1_AUX_CLK_SRC 42 +#define GCC_PCIE_1_CFG_AHB_CLK 43 +#define GCC_PCIE_1_MSTR_AXI_CLK 44 +#define GCC_PCIE_1_PHY_RCHNG_CLK 45 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 46 +#define GCC_PCIE_1_PIPE_CLK 47 +#define GCC_PCIE_1_PIPE_CLK_SRC 48 +#define GCC_PCIE_1_SLV_AXI_CLK 49 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 50 +#define GCC_PDM2_CLK 51 +#define GCC_PDM2_CLK_SRC 52 +#define GCC_PDM_AHB_CLK 53 +#define GCC_PDM_XO4_CLK 54 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 55 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 56 +#define GCC_QMIP_GPU_AHB_CLK 57 +#define GCC_QMIP_PCIE_AHB_CLK 58 +#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 59 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 60 +#define GCC_QMIP_VIDEO_LSR_AHB_CLK 61 +#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 62 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 63 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 64 +#define GCC_QUPV3_WRAP0_CORE_CLK 65 +#define GCC_QUPV3_WRAP0_S0_CLK 66 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 67 +#define GCC_QUPV3_WRAP0_S1_CLK 68 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 69 +#define GCC_QUPV3_WRAP0_S2_CLK 70 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 71 +#define GCC_QUPV3_WRAP0_S3_CLK 72 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 73 +#define GCC_QUPV3_WRAP0_S4_CLK 74 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 75 +#define GCC_QUPV3_WRAP0_S5_CLK 76 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 77 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 78 +#define GCC_QUPV3_WRAP1_CORE_CLK 79 +#define GCC_QUPV3_WRAP1_S0_CLK 80 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 81 +#define GCC_QUPV3_WRAP1_S1_CLK 82 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 83 +#define GCC_QUPV3_WRAP1_S2_CLK 84 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 85 +#define GCC_QUPV3_WRAP1_S3_CLK 86 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 87 +#define GCC_QUPV3_WRAP1_S4_CLK 88 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 89 +#define GCC_QUPV3_WRAP1_S5_CLK 90 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 91 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 92 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 93 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 94 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 95 +#define GCC_SDCC1_AHB_CLK 96 +#define GCC_SDCC1_APPS_CLK 97 +#define GCC_SDCC1_APPS_CLK_SRC 98 +#define GCC_SDCC1_ICE_CORE_CLK 99 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 100 +#define GCC_USB30_PRIM_MASTER_CLK 101 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 102 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 103 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 104 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 105 +#define GCC_USB30_PRIM_SLEEP_CLK 106 +#define GCC_USB3_PRIM_PHY_AUX_CLK 107 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 108 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 109 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 110 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 111 +#define GCC_VIDEO_AHB_CLK 112 +#define GCC_VIDEO_AXI0_CLK 113 +#define GCC_VIDEO_AXI1_CLK 114 +#define GCC_VIDEO_XO_CLK 115 +#define GCC_GPLL4 116 +#define GCC_GPLL5 117 +#define GCC_GPLL7 118 +#define GCC_DDRSS_SPAD_CLK 119 +#define GCC_DDRSS_SPAD_CLK_SRC 120 +#define GCC_VIDEO_AXI0_SREG 121 +#define GCC_VIDEO_AXI1_SREG 122 +#define GCC_IRIS_SS_HF_AXI1_SREG 123 +#define GCC_IRIS_SS_SPD_AXI1_SREG 124 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_GPU_BCR 2 +#define GCC_PCIE_0_BCR 3 +#define GCC_PCIE_0_LINK_DOWN_BCR 4 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_PHY_BCR 6 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PCIE_1_BCR 8 +#define GCC_PCIE_1_LINK_DOWN_BCR 9 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10 +#define GCC_PCIE_1_PHY_BCR 11 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12 +#define GCC_PCIE_PHY_BCR 13 +#define GCC_PCIE_PHY_CFG_AHB_BCR 14 +#define GCC_PCIE_PHY_COM_BCR 15 +#define GCC_PDM_BCR 16 +#define GCC_QUPV3_WRAPPER_0_BCR 17 +#define GCC_QUPV3_WRAPPER_1_BCR 18 +#define GCC_QUSB2PHY_PRIM_BCR 19 +#define GCC_QUSB2PHY_SEC_BCR 20 +#define GCC_SDCC1_BCR 21 +#define GCC_USB30_PRIM_BCR 22 +#define GCC_USB3_DP_PHY_PRIM_BCR 23 +#define GCC_USB3_DP_PHY_SEC_BCR 24 +#define GCC_USB3_PHY_PRIM_BCR 25 +#define GCC_USB3_PHY_SEC_BCR 26 +#define GCC_USB3PHY_PHY_PRIM_BCR 27 +#define GCC_USB3PHY_PHY_SEC_BCR 28 +#define GCC_VIDEO_AXI0_CLK_ARES 29 +#define GCC_VIDEO_AXI1_CLK_ARES 30 +#define GCC_VIDEO_BCR 31 +#define GCC_IRIS_SS_HF_AXI_CLK_ARES 32 +#define GCC_IRIS_SS_SPD_AXI_CLK_ARES 33 +#define GCC_DDRSS_SPAD_CLK_ARES 34 + +/* GCC power domains */ +#define PCIE_0_GDSC 0 +#define PCIE_0_PHY_GDSC 1 +#define PCIE_1_GDSC 2 +#define PCIE_1_PHY_GDSC 3 +#define USB30_PRIM_GDSC 4 +#define USB3_PHY_GDSC 5 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_HF0_GDSC 6 +#define HLOS1_VOTE_MM_SNOC_MMU_TBU_SF0_GDSC 7 +#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 8 +#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 9 + +#endif diff --git a/include/dt-bindings/clock/qcom,sar2130p-gpucc.h b/include/dt-bindings/clock/qcom,sar2130p-gpucc.h new file mode 100644 index 000000000000..a2204369110a --- /dev/null +++ b/include/dt-bindings/clock/qcom,sar2130p-gpucc.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved + * Copyright (c) 2024, Linaro Limited + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SAR2130P_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SAR2130P_H + +/* GPU_CC clocks */ +#define GPU_CC_AHB_CLK 0 +#define GPU_CC_CRC_AHB_CLK 1 +#define GPU_CC_CX_FF_CLK 2 +#define GPU_CC_CX_GMU_CLK 3 +#define GPU_CC_CXO_AON_CLK 4 +#define GPU_CC_CXO_CLK 5 +#define GPU_CC_FF_CLK_SRC 6 +#define GPU_CC_GMU_CLK_SRC 7 +#define GPU_CC_GX_GMU_CLK 8 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 9 +#define GPU_CC_HUB_AON_CLK 10 +#define GPU_CC_HUB_CLK_SRC 11 +#define GPU_CC_HUB_CX_INT_CLK 12 +#define GPU_CC_MEMNOC_GFX_CLK 13 +#define GPU_CC_PLL0 14 +#define GPU_CC_PLL1 15 +#define GPU_CC_SLEEP_CLK 16 + +/* GDSCs */ +#define GPU_GX_GDSC 0 +#define GPU_CX_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h index 8281e9caf3a9..311521fe4b59 100644 --- a/include/dt-bindings/clock/r9a08g045-cpg.h +++ b/include/dt-bindings/clock/r9a08g045-cpg.h @@ -308,5 +308,6 @@ #define R9A08G045_PD_DDR 64 #define R9A08G045_PD_TZCDDR 65 #define R9A08G045_PD_OTFDE_DDR 66 +#define R9A08G045_PD_RTC 67 #endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */ diff --git a/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h new file mode 100644 index 000000000000..67774eafad06 --- /dev/null +++ b/include/dt-bindings/clock/renesas,r9a08g045-vbattb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2024 Renesas Electronics Corp. + */ +#ifndef __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__ +#define __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__ + +#define VBATTB_XC 0 +#define VBATTB_XBYP 1 +#define VBATTB_MUX 2 +#define VBATTB_VBATTCLK 3 + +#endif /* __DT_BINDINGS_CLOCK_R9A08G045_VBATTB_H__ */ diff --git a/include/dt-bindings/clock/samsung,exynos8895.h b/include/dt-bindings/clock/samsung,exynos8895.h new file mode 100644 index 000000000000..27998c53f929 --- /dev/null +++ b/include/dt-bindings/clock/samsung,exynos8895.h @@ -0,0 +1,453 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (C) 2024 Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com> + * Author: Ivaylo Ivanov <ivo.ivanov.ivanov1@gmail.com> + * + * Device Tree binding constants for Exynos8895 clock controller. + */ + +#ifndef _DT_BINDINGS_CLOCK_EXYNOS8895_H +#define _DT_BINDINGS_CLOCK_EXYNOS8895_H + +/* CMU_TOP */ +#define CLK_FOUT_SHARED0_PLL 1 +#define CLK_FOUT_SHARED1_PLL 2 +#define CLK_FOUT_SHARED2_PLL 3 +#define CLK_FOUT_SHARED3_PLL 4 +#define CLK_FOUT_SHARED4_PLL 5 +#define CLK_MOUT_PLL_SHARED0 6 +#define CLK_MOUT_PLL_SHARED1 7 +#define CLK_MOUT_PLL_SHARED2 8 +#define CLK_MOUT_PLL_SHARED3 9 +#define CLK_MOUT_PLL_SHARED4 10 +#define CLK_MOUT_CP2AP_MIF_CLK_USER 11 +#define CLK_MOUT_CMU_ABOX_CPUABOX 12 +#define CLK_MOUT_CMU_APM_BUS 13 +#define CLK_MOUT_CMU_BUS1_BUS 14 +#define CLK_MOUT_CMU_BUSC_BUS 15 +#define CLK_MOUT_CMU_BUSC_BUSPHSI2C 16 +#define CLK_MOUT_CMU_CAM_BUS 17 +#define CLK_MOUT_CMU_CAM_TPU0 18 +#define CLK_MOUT_CMU_CAM_TPU1 19 +#define CLK_MOUT_CMU_CAM_VRA 20 +#define CLK_MOUT_CMU_CIS_CLK0 21 +#define CLK_MOUT_CMU_CIS_CLK1 22 +#define CLK_MOUT_CMU_CIS_CLK2 23 +#define CLK_MOUT_CMU_CIS_CLK3 24 +#define CLK_MOUT_CMU_CORE_BUS 25 +#define CLK_MOUT_CMU_CPUCL0_SWITCH 26 +#define CLK_MOUT_CMU_CPUCL1_SWITCH 27 +#define CLK_MOUT_CMU_DBG_BUS 28 +#define CLK_MOUT_CMU_DCAM_BUS 29 +#define CLK_MOUT_CMU_DCAM_IMGD 30 +#define CLK_MOUT_CMU_DPU_BUS 31 +#define CLK_MOUT_CMU_DROOPDETECTOR 32 +#define CLK_MOUT_CMU_DSP_BUS 33 +#define CLK_MOUT_CMU_FSYS0_BUS 34 +#define CLK_MOUT_CMU_FSYS0_DPGTC 35 +#define CLK_MOUT_CMU_FSYS0_MMC_EMBD 36 +#define CLK_MOUT_CMU_FSYS0_UFS_EMBD 37 +#define CLK_MOUT_CMU_FSYS0_USBDRD30 38 +#define CLK_MOUT_CMU_FSYS1_BUS 39 +#define CLK_MOUT_CMU_FSYS1_MMC_CARD 40 +#define CLK_MOUT_CMU_FSYS1_PCIE 41 +#define CLK_MOUT_CMU_FSYS1_UFS_CARD 42 +#define CLK_MOUT_CMU_G2D_G2D 43 +#define CLK_MOUT_CMU_G2D_JPEG 44 +#define CLK_MOUT_CMU_HPM 45 +#define CLK_MOUT_CMU_IMEM_BUS 46 +#define CLK_MOUT_CMU_ISPHQ_BUS 47 +#define CLK_MOUT_CMU_ISPLP_BUS 48 +#define CLK_MOUT_CMU_IVA_BUS 49 +#define CLK_MOUT_CMU_MFC_BUS 50 +#define CLK_MOUT_CMU_MIF_SWITCH 51 +#define CLK_MOUT_CMU_PERIC0_BUS 52 +#define CLK_MOUT_CMU_PERIC0_UART_DBG 53 +#define CLK_MOUT_CMU_PERIC0_USI00 54 +#define CLK_MOUT_CMU_PERIC0_USI01 55 +#define CLK_MOUT_CMU_PERIC0_USI02 56 +#define CLK_MOUT_CMU_PERIC0_USI03 57 +#define CLK_MOUT_CMU_PERIC1_BUS 58 +#define CLK_MOUT_CMU_PERIC1_SPEEDY2 59 +#define CLK_MOUT_CMU_PERIC1_SPI_CAM0 60 +#define CLK_MOUT_CMU_PERIC1_SPI_CAM1 61 +#define CLK_MOUT_CMU_PERIC1_UART_BT 62 +#define CLK_MOUT_CMU_PERIC1_USI04 63 +#define CLK_MOUT_CMU_PERIC1_USI05 64 +#define CLK_MOUT_CMU_PERIC1_USI06 65 +#define CLK_MOUT_CMU_PERIC1_USI07 66 +#define CLK_MOUT_CMU_PERIC1_USI08 67 +#define CLK_MOUT_CMU_PERIC1_USI09 68 +#define CLK_MOUT_CMU_PERIC1_USI10 69 +#define CLK_MOUT_CMU_PERIC1_USI11 70 +#define CLK_MOUT_CMU_PERIC1_USI12 71 +#define CLK_MOUT_CMU_PERIC1_USI13 72 +#define CLK_MOUT_CMU_PERIS_BUS 73 +#define CLK_MOUT_CMU_SRDZ_BUS 74 +#define CLK_MOUT_CMU_SRDZ_IMGD 75 +#define CLK_MOUT_CMU_VPU_BUS 76 +#define CLK_DOUT_CMU_ABOX_CPUABOX 77 +#define CLK_DOUT_CMU_APM_BUS 78 +#define CLK_DOUT_CMU_BUS1_BUS 79 +#define CLK_DOUT_CMU_BUSC_BUS 80 +#define CLK_DOUT_CMU_BUSC_BUSPHSI2C 81 +#define CLK_DOUT_CMU_CAM_BUS 82 +#define CLK_DOUT_CMU_CAM_TPU0 83 +#define CLK_DOUT_CMU_CAM_TPU1 84 +#define CLK_DOUT_CMU_CAM_VRA 85 +#define CLK_DOUT_CMU_CIS_CLK0 86 +#define CLK_DOUT_CMU_CIS_CLK1 87 +#define CLK_DOUT_CMU_CIS_CLK2 88 +#define CLK_DOUT_CMU_CIS_CLK3 89 +#define CLK_DOUT_CMU_CORE_BUS 90 +#define CLK_DOUT_CMU_CPUCL0_SWITCH 91 +#define CLK_DOUT_CMU_CPUCL1_SWITCH 92 +#define CLK_DOUT_CMU_DBG_BUS 93 +#define CLK_DOUT_CMU_DCAM_BUS 94 +#define CLK_DOUT_CMU_DCAM_IMGD 95 +#define CLK_DOUT_CMU_DPU_BUS 96 +#define CLK_DOUT_CMU_DSP_BUS 97 +#define CLK_DOUT_CMU_FSYS0_BUS 98 +#define CLK_DOUT_CMU_FSYS0_DPGTC 99 +#define CLK_DOUT_CMU_FSYS0_MMC_EMBD 100 +#define CLK_DOUT_CMU_FSYS0_UFS_EMBD 101 +#define CLK_DOUT_CMU_FSYS0_USBDRD30 102 +#define CLK_DOUT_CMU_FSYS1_BUS 103 +#define CLK_DOUT_CMU_FSYS1_MMC_CARD 104 +#define CLK_DOUT_CMU_FSYS1_UFS_CARD 105 +#define CLK_DOUT_CMU_G2D_G2D 106 +#define CLK_DOUT_CMU_G2D_JPEG 107 +#define CLK_DOUT_CMU_G3D_SWITCH 108 +#define CLK_DOUT_CMU_HPM 109 +#define CLK_DOUT_CMU_IMEM_BUS 110 +#define CLK_DOUT_CMU_ISPHQ_BUS 111 +#define CLK_DOUT_CMU_ISPLP_BUS 112 +#define CLK_DOUT_CMU_IVA_BUS 113 +#define CLK_DOUT_CMU_MFC_BUS 114 +#define CLK_DOUT_CMU_MODEM_SHARED0 115 +#define CLK_DOUT_CMU_MODEM_SHARED1 116 +#define CLK_DOUT_CMU_PERIC0_BUS 117 +#define CLK_DOUT_CMU_PERIC0_UART_DBG 118 +#define CLK_DOUT_CMU_PERIC0_USI00 119 +#define CLK_DOUT_CMU_PERIC0_USI01 120 +#define CLK_DOUT_CMU_PERIC0_USI02 121 +#define CLK_DOUT_CMU_PERIC0_USI03 122 +#define CLK_DOUT_CMU_PERIC1_BUS 123 +#define CLK_DOUT_CMU_PERIC1_SPEEDY2 124 +#define CLK_DOUT_CMU_PERIC1_SPI_CAM0 125 +#define CLK_DOUT_CMU_PERIC1_SPI_CAM1 126 +#define CLK_DOUT_CMU_PERIC1_UART_BT 127 +#define CLK_DOUT_CMU_PERIC1_USI04 128 +#define CLK_DOUT_CMU_PERIC1_USI05 129 +#define CLK_DOUT_CMU_PERIC1_USI06 130 +#define CLK_DOUT_CMU_PERIC1_USI07 131 +#define CLK_DOUT_CMU_PERIC1_USI08 132 +#define CLK_DOUT_CMU_PERIC1_USI09 133 +#define CLK_DOUT_CMU_PERIC1_USI10 134 +#define CLK_DOUT_CMU_PERIC1_USI11 135 +#define CLK_DOUT_CMU_PERIC1_USI12 136 +#define CLK_DOUT_CMU_PERIC1_USI13 137 +#define CLK_DOUT_CMU_PERIS_BUS 138 +#define CLK_DOUT_CMU_SRDZ_BUS 139 +#define CLK_DOUT_CMU_SRDZ_IMGD 140 +#define CLK_DOUT_CMU_VPU_BUS 141 +#define CLK_DOUT_CMU_SHARED0_DIV2 142 +#define CLK_DOUT_CMU_SHARED0_DIV4 143 +#define CLK_DOUT_CMU_SHARED1_DIV2 144 +#define CLK_DOUT_CMU_SHARED1_DIV4 145 +#define CLK_DOUT_CMU_SHARED2_DIV2 146 +#define CLK_DOUT_CMU_SHARED3_DIV2 147 +#define CLK_DOUT_CMU_SHARED4_DIV2 148 +#define CLK_DOUT_CMU_FSYS1_PCIE 149 +#define CLK_DOUT_CMU_CP2AP_MIF_CLK_DIV2 150 +#define CLK_DOUT_CMU_CMU_OTP 151 +#define CLK_GOUT_CMU_DROOPDETECTOR 152 +#define CLK_GOUT_CMU_MIF_SWITCH 153 +#define CLK_GOUT_CMU_ABOX_CPUABOX 154 +#define CLK_GOUT_CMU_APM_BUS 155 +#define CLK_GOUT_CMU_BUS1_BUS 156 +#define CLK_GOUT_CMU_BUSC_BUS 157 +#define CLK_GOUT_CMU_BUSC_BUSPHSI2C 158 +#define CLK_GOUT_CMU_CAM_BUS 159 +#define CLK_GOUT_CMU_CAM_TPU0 160 +#define CLK_GOUT_CMU_CAM_TPU1 161 +#define CLK_GOUT_CMU_CAM_VRA 162 +#define CLK_GOUT_CMU_CIS_CLK0 163 +#define CLK_GOUT_CMU_CIS_CLK1 164 +#define CLK_GOUT_CMU_CIS_CLK2 165 +#define CLK_GOUT_CMU_CIS_CLK3 166 +#define CLK_GOUT_CMU_CORE_BUS 167 +#define CLK_GOUT_CMU_CPUCL0_SWITCH 168 +#define CLK_GOUT_CMU_CPUCL1_SWITCH 169 +#define CLK_GOUT_CMU_DBG_BUS 170 +#define CLK_GOUT_CMU_DCAM_BUS 171 +#define CLK_GOUT_CMU_DCAM_IMGD 172 +#define CLK_GOUT_CMU_DPU_BUS 173 +#define CLK_GOUT_CMU_DSP_BUS 174 +#define CLK_GOUT_CMU_FSYS0_BUS 175 +#define CLK_GOUT_CMU_FSYS0_DPGTC 176 +#define CLK_GOUT_CMU_FSYS0_MMC_EMBD 177 +#define CLK_GOUT_CMU_FSYS0_UFS_EMBD 178 +#define CLK_GOUT_CMU_FSYS0_USBDRD30 179 +#define CLK_GOUT_CMU_FSYS1_BUS 180 +#define CLK_GOUT_CMU_FSYS1_MMC_CARD 181 +#define CLK_GOUT_CMU_FSYS1_PCIE 182 +#define CLK_GOUT_CMU_FSYS1_UFS_CARD 183 +#define CLK_GOUT_CMU_G2D_G2D 184 +#define CLK_GOUT_CMU_G2D_JPEG 185 +#define CLK_GOUT_CMU_G3D_SWITCH 186 +#define CLK_GOUT_CMU_HPM 187 +#define CLK_GOUT_CMU_IMEM_BUS 188 +#define CLK_GOUT_CMU_ISPHQ_BUS 189 +#define CLK_GOUT_CMU_ISPLP_BUS 190 +#define CLK_GOUT_CMU_IVA_BUS 191 +#define CLK_GOUT_CMU_MFC_BUS 192 +#define CLK_GOUT_CMU_MODEM_SHARED0 193 +#define CLK_GOUT_CMU_MODEM_SHARED1 194 +#define CLK_GOUT_CMU_PERIC0_BUS 195 +#define CLK_GOUT_CMU_PERIC0_UART_DBG 196 +#define CLK_GOUT_CMU_PERIC0_USI00 197 +#define CLK_GOUT_CMU_PERIC0_USI01 198 +#define CLK_GOUT_CMU_PERIC0_USI02 199 +#define CLK_GOUT_CMU_PERIC0_USI03 200 +#define CLK_GOUT_CMU_PERIC1_BUS 201 +#define CLK_GOUT_CMU_PERIC1_SPEEDY2 202 +#define CLK_GOUT_CMU_PERIC1_SPI_CAM0 203 +#define CLK_GOUT_CMU_PERIC1_SPI_CAM1 204 +#define CLK_GOUT_CMU_PERIC1_UART_BT 205 +#define CLK_GOUT_CMU_PERIC1_USI04 206 +#define CLK_GOUT_CMU_PERIC1_USI05 207 +#define CLK_GOUT_CMU_PERIC1_USI06 208 +#define CLK_GOUT_CMU_PERIC1_USI07 209 +#define CLK_GOUT_CMU_PERIC1_USI08 210 +#define CLK_GOUT_CMU_PERIC1_USI09 211 +#define CLK_GOUT_CMU_PERIC1_USI10 212 +#define CLK_GOUT_CMU_PERIC1_USI11 213 +#define CLK_GOUT_CMU_PERIC1_USI12 214 +#define CLK_GOUT_CMU_PERIC1_USI13 215 +#define CLK_GOUT_CMU_PERIS_BUS 216 +#define CLK_GOUT_CMU_SRDZ_BUS 217 +#define CLK_GOUT_CMU_SRDZ_IMGD 218 +#define CLK_GOUT_CMU_VPU_BUS 219 + +/* CMU_PERIS */ +#define CLK_MOUT_PERIS_BUS_USER 1 +#define CLK_MOUT_PERIS_GIC 2 +#define CLK_GOUT_PERIS_CMU_PERIS_PCLK 3 +#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKM 4 +#define CLK_GOUT_PERIS_AD_AXI_P_PERIS_ACLKS 5 +#define CLK_GOUT_PERIS_AXI2APB_PERISP0_ACLK 6 +#define CLK_GOUT_PERIS_AXI2APB_PERISP1_ACLK 7 +#define CLK_GOUT_PERIS_BUSIF_TMU_PCLK 8 +#define CLK_GOUT_PERIS_GIC_CLK 9 +#define CLK_GOUT_PERIS_LHM_AXI_P_PERIS_I_CLK 10 +#define CLK_GOUT_PERIS_MCT_PCLK 11 +#define CLK_GOUT_PERIS_OTP_CON_BIRA_PCLK 12 +#define CLK_GOUT_PERIS_OTP_CON_TOP_PCLK 13 +#define CLK_GOUT_PERIS_PMU_PERIS_PCLK 14 +#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_BUSP_CLK 15 +#define CLK_GOUT_PERIS_RSTNSYNC_CLK_PERIS_GIC_CLK 16 +#define CLK_GOUT_PERIS_SYSREG_PERIS_PCLK 17 +#define CLK_GOUT_PERIS_TZPC00_PCLK 18 +#define CLK_GOUT_PERIS_TZPC01_PCLK 19 +#define CLK_GOUT_PERIS_TZPC02_PCLK 20 +#define CLK_GOUT_PERIS_TZPC03_PCLK 21 +#define CLK_GOUT_PERIS_TZPC04_PCLK 22 +#define CLK_GOUT_PERIS_TZPC05_PCLK 23 +#define CLK_GOUT_PERIS_TZPC06_PCLK 24 +#define CLK_GOUT_PERIS_TZPC07_PCLK 25 +#define CLK_GOUT_PERIS_TZPC08_PCLK 26 +#define CLK_GOUT_PERIS_TZPC09_PCLK 27 +#define CLK_GOUT_PERIS_TZPC10_PCLK 28 +#define CLK_GOUT_PERIS_TZPC11_PCLK 29 +#define CLK_GOUT_PERIS_TZPC12_PCLK 30 +#define CLK_GOUT_PERIS_TZPC13_PCLK 31 +#define CLK_GOUT_PERIS_TZPC14_PCLK 32 +#define CLK_GOUT_PERIS_TZPC15_PCLK 33 +#define CLK_GOUT_PERIS_WDT_CLUSTER0_PCLK 34 +#define CLK_GOUT_PERIS_WDT_CLUSTER1_PCLK 35 +#define CLK_GOUT_PERIS_XIU_P_PERIS_ACLK 36 + +/* CMU_FSYS0 */ +#define CLK_MOUT_FSYS0_BUS_USER 1 +#define CLK_MOUT_FSYS0_DPGTC_USER 2 +#define CLK_MOUT_FSYS0_MMC_EMBD_USER 3 +#define CLK_MOUT_FSYS0_UFS_EMBD_USER 4 +#define CLK_MOUT_FSYS0_USBDRD30_USER 5 +#define CLK_GOUT_FSYS0_FSYS0_CMU_FSYS0_PCLK 6 +#define CLK_GOUT_FSYS0_AHBBR_FSYS0_HCLK 7 +#define CLK_GOUT_FSYS0_AXI2AHB_FSYS0_ACLK 8 +#define CLK_GOUT_FSYS0_AXI2AHB_USB_FSYS0_ACLK 9 +#define CLK_GOUT_FSYS0_AXI2APB_FSYS0_ACLK 10 +#define CLK_GOUT_FSYS0_BTM_FSYS0_I_ACLK 11 +#define CLK_GOUT_FSYS0_BTM_FSYS0_I_PCLK 12 +#define CLK_GOUT_FSYS0_DP_LINK_I_GTC_EXT_CLK 13 +#define CLK_GOUT_FSYS0_DP_LINK_I_PCLK 14 +#define CLK_GOUT_FSYS0_ETR_MIU_I_ACLK 15 +#define CLK_GOUT_FSYS0_ETR_MIU_I_PCLK 16 +#define CLK_GOUT_FSYS0_GPIO_FSYS0_PCLK 17 +#define CLK_GOUT_FSYS0_LHM_AXI_D_USBTV_I_CLK 18 +#define CLK_GOUT_FSYS0_LHM_AXI_G_ETR_I_CLK 19 +#define CLK_GOUT_FSYS0_LHM_AXI_P_FSYS0_I_CLK 20 +#define CLK_GOUT_FSYS0_LHS_ACEL_D_FSYS0_I_CLK 21 +#define CLK_GOUT_FSYS0_MMC_EMBD_I_ACLK 22 +#define CLK_GOUT_FSYS0_MMC_EMBD_SDCLKIN 23 +#define CLK_GOUT_FSYS0_PMU_FSYS0_PCLK 24 +#define CLK_GOUT_FSYS0_BCM_FSYS0_ACLK 25 +#define CLK_GOUT_FSYS0_BCM_FSYS0_PCLK 26 +#define CLK_GOUT_FSYS0_RSTNSYNC_CLK_FSYS0_BUS_CLK 27 +#define CLK_GOUT_FSYS0_SYSREG_FSYS0_PCLK 28 +#define CLK_GOUT_FSYS0_UFS_EMBD_I_ACLK 29 +#define CLK_GOUT_FSYS0_UFS_EMBD_I_CLK_UNIPRO 30 +#define CLK_GOUT_FSYS0_UFS_EMBD_I_FMP_CLK 31 +#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_ACLK 32 +#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_REF_CLK 33 +#define CLK_GOUT_FSYS0_USBTV_I_USB30DRD_SUSPEND_CLK 34 +#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_AHB_CLK 35 +#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_CORE_CLK 36 +#define CLK_GOUT_FSYS0_USBTV_I_USBTVH_XIU_CLK 37 +#define CLK_GOUT_FSYS0_US_D_FSYS0_USB_ACLK 38 +#define CLK_GOUT_FSYS0_XIU_D_FSYS0_ACLK 39 +#define CLK_GOUT_FSYS0_XIU_D_FSYS0_USB_ACLK 40 +#define CLK_GOUT_FSYS0_XIU_P_FSYS0_ACLK 41 + +/* CMU_FSYS1 */ +#define CLK_MOUT_FSYS1_BUS_USER 1 +#define CLK_MOUT_FSYS1_MMC_CARD_USER 2 +#define CLK_MOUT_FSYS1_PCIE_USER 3 +#define CLK_MOUT_FSYS1_UFS_CARD_USER 4 +#define CLK_GOUT_FSYS1_PCIE_PHY_REF_CLK_IN 5 +#define CLK_GOUT_FSYS1_ADM_AHB_SSS_HCLKM 6 +#define CLK_GOUT_FSYS1_AHBBR_FSYS1_HCLK 7 +#define CLK_GOUT_FSYS1_AXI2AHB_FSYS1_ACLK 8 +#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P0_ACLK 9 +#define CLK_GOUT_FSYS1_AXI2APB_FSYS1P1_ACLK 10 +#define CLK_GOUT_FSYS1_BTM_FSYS1_I_ACLK 11 +#define CLK_GOUT_FSYS1_BTM_FSYS1_I_PCLK 12 +#define CLK_GOUT_FSYS1_FSYS1_CMU_FSYS1_PCLK 13 +#define CLK_GOUT_FSYS1_GPIO_FSYS1_PCLK 14 +#define CLK_GOUT_FSYS1_LHM_AXI_P_FSYS1_I_CLK 15 +#define CLK_GOUT_FSYS1_LHS_ACEL_D_FSYS1_I_CLK 16 +#define CLK_GOUT_FSYS1_MMC_CARD_I_ACLK 17 +#define CLK_GOUT_FSYS1_MMC_CARD_SDCLKIN 18 +#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_0 19 +#define CLK_GOUT_FSYS1_PCIE_DBI_ACLK_1 20 +#define CLK_GOUT_FSYS1_PCIE_IEEE1500_WRAPPER_FOR_PCIE_PHY_LC_X2_INST_0_I_SCL_APB_PCLK 21 +#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_0 22 +#define CLK_GOUT_FSYS1_PCIE_MSTR_ACLK_1 23 +#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_0_I_DRIVER_APB_CLK 24 +#define CLK_GOUT_FSYS1_PCIE_PCIE_SUB_CTRL_INST_1_I_DRIVER_APB_CLK 25 +#define CLK_GOUT_FSYS1_PCIE_PIPE2_DIGITAL_X2_WRAP_INST_0_I_APB_PCLK_SCL 26 +#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_0 27 +#define CLK_GOUT_FSYS1_PCIE_SLV_ACLK_1 28 +#define CLK_GOUT_FSYS1_PMU_FSYS1_PCLK 29 +#define CLK_GOUT_FSYS1_BCM_FSYS1_ACLK 30 +#define CLK_GOUT_FSYS1_BCM_FSYS1_PCLK 31 +#define CLK_GOUT_FSYS1_RSTNSYNC_CLK_FSYS1_BUS_CLK 32 +#define CLK_GOUT_FSYS1_RTIC_I_ACLK 33 +#define CLK_GOUT_FSYS1_RTIC_I_PCLK 34 +#define CLK_GOUT_FSYS1_SSS_I_ACLK 35 +#define CLK_GOUT_FSYS1_SSS_I_PCLK 36 +#define CLK_GOUT_FSYS1_SYSREG_FSYS1_PCLK 37 +#define CLK_GOUT_FSYS1_TOE_WIFI0_I_CLK 38 +#define CLK_GOUT_FSYS1_TOE_WIFI1_I_CLK 39 +#define CLK_GOUT_FSYS1_UFS_CARD_I_ACLK 40 +#define CLK_GOUT_FSYS1_UFS_CARD_I_CLK_UNIPRO 41 +#define CLK_GOUT_FSYS1_UFS_CARD_I_FMP_CLK 42 +#define CLK_GOUT_FSYS1_XIU_D_FSYS1_ACLK 43 +#define CLK_GOUT_FSYS1_XIU_P_FSYS1_ACLK 44 + +/* CMU_PERIC0 */ +#define CLK_MOUT_PERIC0_BUS_USER 1 +#define CLK_MOUT_PERIC0_UART_DBG_USER 2 +#define CLK_MOUT_PERIC0_USI00_USER 3 +#define CLK_MOUT_PERIC0_USI01_USER 4 +#define CLK_MOUT_PERIC0_USI02_USER 5 +#define CLK_MOUT_PERIC0_USI03_USER 6 +#define CLK_GOUT_PERIC0_PERIC0_CMU_PERIC0_PCLK 7 +#define CLK_GOUT_PERIC0_AXI2APB_PERIC0_ACLK 8 +#define CLK_GOUT_PERIC0_GPIO_PERIC0_PCLK 9 +#define CLK_GOUT_PERIC0_LHM_AXI_P_PERIC0_I_CLK 10 +#define CLK_GOUT_PERIC0_PMU_PERIC0_PCLK 11 +#define CLK_GOUT_PERIC0_PWM_I_PCLK_S0 12 +#define CLK_GOUT_PERIC0_RSTNSYNC_CLK_PERIC0_BUSP_CLK 13 +#define CLK_GOUT_PERIC0_SPEEDY2_TSP_CLK 14 +#define CLK_GOUT_PERIC0_SYSREG_PERIC0_PCLK 15 +#define CLK_GOUT_PERIC0_UART_DBG_EXT_UCLK 16 +#define CLK_GOUT_PERIC0_UART_DBG_PCLK 17 +#define CLK_GOUT_PERIC0_USI00_I_PCLK 18 +#define CLK_GOUT_PERIC0_USI00_I_SCLK_USI 19 +#define CLK_GOUT_PERIC0_USI01_I_PCLK 20 +#define CLK_GOUT_PERIC0_USI01_I_SCLK_USI 21 +#define CLK_GOUT_PERIC0_USI02_I_PCLK 22 +#define CLK_GOUT_PERIC0_USI02_I_SCLK_USI 23 +#define CLK_GOUT_PERIC0_USI03_I_PCLK 24 +#define CLK_GOUT_PERIC0_USI03_I_SCLK_USI 25 + +/* CMU_PERIC1 */ +#define CLK_MOUT_PERIC1_BUS_USER 1 +#define CLK_MOUT_PERIC1_SPEEDY2_USER 2 +#define CLK_MOUT_PERIC1_SPI_CAM0_USER 3 +#define CLK_MOUT_PERIC1_SPI_CAM1_USER 4 +#define CLK_MOUT_PERIC1_UART_BT_USER 5 +#define CLK_MOUT_PERIC1_USI04_USER 6 +#define CLK_MOUT_PERIC1_USI05_USER 7 +#define CLK_MOUT_PERIC1_USI06_USER 8 +#define CLK_MOUT_PERIC1_USI07_USER 9 +#define CLK_MOUT_PERIC1_USI08_USER 10 +#define CLK_MOUT_PERIC1_USI09_USER 11 +#define CLK_MOUT_PERIC1_USI10_USER 12 +#define CLK_MOUT_PERIC1_USI11_USER 13 +#define CLK_MOUT_PERIC1_USI12_USER 14 +#define CLK_MOUT_PERIC1_USI13_USER 15 +#define CLK_GOUT_PERIC1_PERIC1_CMU_PERIC1_PCLK 16 +#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_SPEEDY2_CLK 17 +#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P0_ACLK 18 +#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P1_ACLK 19 +#define CLK_GOUT_PERIC1_AXI2APB_PERIC1P2_ACLK 20 +#define CLK_GOUT_PERIC1_GPIO_PERIC1_PCLK 21 +#define CLK_GOUT_PERIC1_HSI2C_CAM0_IPCLK 22 +#define CLK_GOUT_PERIC1_HSI2C_CAM1_IPCLK 23 +#define CLK_GOUT_PERIC1_HSI2C_CAM2_IPCLK 24 +#define CLK_GOUT_PERIC1_HSI2C_CAM3_IPCLK 25 +#define CLK_GOUT_PERIC1_LHM_AXI_P_PERIC1_I_CLK 26 +#define CLK_GOUT_PERIC1_PMU_PERIC1_PCLK 27 +#define CLK_GOUT_PERIC1_RSTNSYNC_CLK_PERIC1_BUSP_CLK 28 +#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_CLK 29 +#define CLK_GOUT_PERIC1_SPEEDY2_DDI1_SCLK 30 +#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_CLK 31 +#define CLK_GOUT_PERIC1_SPEEDY2_DDI2_SCLK 32 +#define CLK_GOUT_PERIC1_SPEEDY2_DDI_CLK 33 +#define CLK_GOUT_PERIC1_SPEEDY2_DDI_SCLK 34 +#define CLK_GOUT_PERIC1_SPEEDY2_TSP1_CLK 35 +#define CLK_GOUT_PERIC1_SPEEDY2_TSP2_CLK 36 +#define CLK_GOUT_PERIC1_SPI_CAM0_PCLK 37 +#define CLK_GOUT_PERIC1_SPI_CAM0_SPI_EXT_CLK 38 +#define CLK_GOUT_PERIC1_SPI_CAM1_PCLK 39 +#define CLK_GOUT_PERIC1_SPI_CAM1_SPI_EXT_CLK 40 +#define CLK_GOUT_PERIC1_SYSREG_PERIC1_PCLK 41 +#define CLK_GOUT_PERIC1_UART_BT_EXT_UCLK 42 +#define CLK_GOUT_PERIC1_UART_BT_PCLK 43 +#define CLK_GOUT_PERIC1_USI04_I_PCLK 44 +#define CLK_GOUT_PERIC1_USI04_I_SCLK_USI 45 +#define CLK_GOUT_PERIC1_USI05_I_PCLK 46 +#define CLK_GOUT_PERIC1_USI05_I_SCLK_USI 47 +#define CLK_GOUT_PERIC1_USI06_I_PCLK 48 +#define CLK_GOUT_PERIC1_USI06_I_SCLK_USI 49 +#define CLK_GOUT_PERIC1_USI07_I_PCLK 50 +#define CLK_GOUT_PERIC1_USI07_I_SCLK_USI 51 +#define CLK_GOUT_PERIC1_USI08_I_PCLK 52 +#define CLK_GOUT_PERIC1_USI08_I_SCLK_USI 53 +#define CLK_GOUT_PERIC1_USI09_I_PCLK 54 +#define CLK_GOUT_PERIC1_USI09_I_SCLK_USI 55 +#define CLK_GOUT_PERIC1_USI10_I_PCLK 56 +#define CLK_GOUT_PERIC1_USI10_I_SCLK_USI 57 +#define CLK_GOUT_PERIC1_USI11_I_PCLK 58 +#define CLK_GOUT_PERIC1_USI11_I_SCLK_USI 59 +#define CLK_GOUT_PERIC1_USI12_I_PCLK 60 +#define CLK_GOUT_PERIC1_USI12_I_SCLK_USI 61 +#define CLK_GOUT_PERIC1_USI13_I_PCLK 62 +#define CLK_GOUT_PERIC1_USI13_I_SCLK_USI 63 +#define CLK_GOUT_PERIC1_XIU_P_PERIC1_ACLK 64 + +#endif /* _DT_BINDINGS_CLOCK_EXYNOS8895_H */ diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h index c720f344b6bf..0c681f2ba3d0 100644 --- a/include/dt-bindings/clock/samsung,exynosautov920.h +++ b/include/dt-bindings/clock/samsung,exynosautov920.h @@ -160,6 +160,7 @@ #define DOUT_CLKCMU_SNW_NOC 144 #define DOUT_CLKCMU_SSP_NOC 145 #define DOUT_CLKCMU_TAA_NOC 146 +#define DOUT_TCXO_DIV2 147 /* CMU_PERIC0 */ #define CLK_MOUT_PERIC0_IP_USER 1 @@ -188,4 +189,50 @@ #define CLK_DOUT_PERIC0_USI_I2C 23 #define CLK_DOUT_PERIC0_I3C 24 +/* CMU_PERIC1 */ +#define CLK_MOUT_PERIC1_IP_USER 1 +#define CLK_MOUT_PERIC1_NOC_USER 2 +#define CLK_MOUT_PERIC1_USI09_USI 3 +#define CLK_MOUT_PERIC1_USI10_USI 4 +#define CLK_MOUT_PERIC1_USI11_USI 5 +#define CLK_MOUT_PERIC1_USI12_USI 6 +#define CLK_MOUT_PERIC1_USI13_USI 7 +#define CLK_MOUT_PERIC1_USI14_USI 8 +#define CLK_MOUT_PERIC1_USI15_USI 9 +#define CLK_MOUT_PERIC1_USI16_USI 10 +#define CLK_MOUT_PERIC1_USI17_USI 11 +#define CLK_MOUT_PERIC1_USI_I2C 12 +#define CLK_MOUT_PERIC1_I3C 13 + +#define CLK_DOUT_PERIC1_USI09_USI 14 +#define CLK_DOUT_PERIC1_USI10_USI 15 +#define CLK_DOUT_PERIC1_USI11_USI 16 +#define CLK_DOUT_PERIC1_USI12_USI 17 +#define CLK_DOUT_PERIC1_USI13_USI 18 +#define CLK_DOUT_PERIC1_USI14_USI 19 +#define CLK_DOUT_PERIC1_USI15_USI 20 +#define CLK_DOUT_PERIC1_USI16_USI 21 +#define CLK_DOUT_PERIC1_USI17_USI 22 +#define CLK_DOUT_PERIC1_USI_I2C 23 +#define CLK_DOUT_PERIC1_I3C 24 + +/* CMU_MISC */ +#define CLK_MOUT_MISC_NOC_USER 1 +#define CLK_MOUT_MISC_GIC 2 + +#define CLK_DOUT_MISC_OTP 3 +#define CLK_DOUT_MISC_NOCP 4 +#define CLK_DOUT_MISC_OSC_DIV2 5 + +/* CMU_HSI0 */ +#define CLK_MOUT_HSI0_NOC_USER 1 + +#define CLK_DOUT_HSI0_PCIE_APB 2 + +/* CMU_HSI1 */ +#define CLK_MOUT_HSI1_MMC_CARD_USER 1 +#define CLK_MOUT_HSI1_NOC_USER 2 +#define CLK_MOUT_HSI1_USBDRD_USER 3 +#define CLK_MOUT_HSI1_USBDRD 4 + #endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */ diff --git a/include/dt-bindings/iio/adc/gehc,pmc-adc.h b/include/dt-bindings/iio/adc/gehc,pmc-adc.h new file mode 100644 index 000000000000..2f291e3c76ae --- /dev/null +++ b/include/dt-bindings/iio/adc/gehc,pmc-adc.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ + +#ifndef _DT_BINDINGS_IIO_ADC_GEHC_PMC_ADC_H +#define _DT_BINDINGS_IIO_ADC_GEHC_PMC_ADC_H + +/* ADC channel type */ +#define GEHC_PMC_ADC_VOLTAGE 0 +#define GEHC_PMC_ADC_CURRENT 1 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h b/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h new file mode 100644 index 000000000000..84ae0d39e73c --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,qcs615-rpmh.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS615_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS615_H + +#define MASTER_A1NOC_CFG 1 +#define MASTER_QDSS_BAM 2 +#define MASTER_QSPI 3 +#define MASTER_QUP_0 4 +#define MASTER_BLSP_1 5 +#define MASTER_CNOC_A2NOC 6 +#define MASTER_CRYPTO 7 +#define MASTER_IPA 8 +#define MASTER_EMAC_EVB 9 +#define MASTER_PCIE 10 +#define MASTER_QDSS_ETR 11 +#define MASTER_SDCC_1 12 +#define MASTER_SDCC_2 13 +#define MASTER_UFS_MEM 14 +#define MASTER_USB2 15 +#define MASTER_USB3_0 16 +#define SLAVE_A1NOC_SNOC 17 +#define SLAVE_LPASS_SNOC 18 +#define SLAVE_ANOC_PCIE_SNOC 19 +#define SLAVE_SERVICE_A2NOC 20 + +#define MASTER_CAMNOC_HF0_UNCOMP 1 +#define MASTER_CAMNOC_HF1_UNCOMP 2 +#define MASTER_CAMNOC_SF_UNCOMP 3 +#define SLAVE_CAMNOC_UNCOMP 4 + +#define MASTER_SPDM 1 +#define MASTER_SNOC_CNOC 2 +#define MASTER_QDSS_DAP 3 +#define SLAVE_A1NOC_CFG 4 +#define SLAVE_AHB2PHY_EAST 5 +#define SLAVE_AHB2PHY_WEST 6 +#define SLAVE_AOP 7 +#define SLAVE_AOSS 8 +#define SLAVE_CAMERA_CFG 9 +#define SLAVE_CLK_CTL 10 +#define SLAVE_RBCPR_CX_CFG 11 +#define SLAVE_RBCPR_MX_CFG 12 +#define SLAVE_CRYPTO_0_CFG 13 +#define SLAVE_CNOC_DDRSS 14 +#define SLAVE_DISPLAY_CFG 15 +#define SLAVE_EMAC_AVB_CFG 16 +#define SLAVE_GLM 17 +#define SLAVE_GFX3D_CFG 18 +#define SLAVE_IMEM_CFG 19 +#define SLAVE_IPA_CFG 20 +#define SLAVE_CNOC_MNOC_CFG 21 +#define SLAVE_PCIE_CFG 22 +#define SLAVE_PIMEM_CFG 23 +#define SLAVE_PRNG 24 +#define SLAVE_QDSS_CFG 25 +#define SLAVE_QSPI 26 +#define SLAVE_QUP_0 27 +#define SLAVE_QUP_1 28 +#define SLAVE_SDCC_1 29 +#define SLAVE_SDCC_2 30 +#define SLAVE_SNOC_CFG 31 +#define SLAVE_SPDM_WRAPPER 32 +#define SLAVE_TCSR 33 +#define SLAVE_TLMM_EAST 34 +#define SLAVE_TLMM_SOUTH 35 +#define SLAVE_TLMM_WEST 36 +#define SLAVE_UFS_MEM_CFG 37 +#define SLAVE_USB2 38 +#define SLAVE_USB3 39 +#define SLAVE_VENUS_CFG 40 +#define SLAVE_VSENSE_CTRL_CFG 41 +#define SLAVE_CNOC_A2NOC 42 +#define SLAVE_SERVICE_CNOC 43 + +#define MASTER_CNOC_DC_NOC 1 +#define SLAVE_DC_NOC_GEMNOC 2 +#define SLAVE_LLCC_CFG 3 + +#define MASTER_APPSS_PROC 1 +#define MASTER_GPU_TCU 2 +#define MASTER_SYS_TCU 3 +#define MASTER_GEM_NOC_CFG 4 +#define MASTER_GFX3D 5 +#define MASTER_MNOC_HF_MEM_NOC 6 +#define MASTER_MNOC_SF_MEM_NOC 7 +#define MASTER_SNOC_GC_MEM_NOC 8 +#define MASTER_SNOC_SF_MEM_NOC 9 +#define SLAVE_MSS_PROC_MS_MPU_CFG 10 +#define SLAVE_GEM_NOC_SNOC 11 +#define SLAVE_LLCC 12 +#define SLAVE_MEM_NOC_PCIE_SNOC 13 +#define SLAVE_SERVICE_GEM_NOC 14 + +#define MASTER_IPA_CORE 1 +#define SLAVE_IPA_CORE 2 + +#define MASTER_LLCC 1 +#define SLAVE_EBI1 2 + +#define MASTER_CNOC_MNOC_CFG 1 +#define MASTER_CAMNOC_HF0 2 +#define MASTER_CAMNOC_HF1 3 +#define MASTER_CAMNOC_SF 4 +#define MASTER_MDP0 5 +#define MASTER_ROTATOR 6 +#define MASTER_VIDEO_P0 7 +#define MASTER_VIDEO_PROC 8 +#define SLAVE_MNOC_SF_MEM_NOC 9 +#define SLAVE_MNOC_HF_MEM_NOC 10 +#define SLAVE_SERVICE_MNOC 11 + +#define MASTER_SNOC_CFG 1 +#define MASTER_A1NOC_SNOC 2 +#define MASTER_GEM_NOC_SNOC 3 +#define MASTER_GEM_NOC_PCIE_SNOC 4 +#define MASTER_LPASS_ANOC 5 +#define MASTER_ANOC_PCIE_SNOC 6 +#define MASTER_PIMEM 7 +#define MASTER_GIC 8 +#define SLAVE_APPSS 9 +#define SLAVE_SNOC_CNOC 10 +#define SLAVE_SNOC_GEM_NOC_SF 11 +#define SLAVE_SNOC_MEM_NOC_GC 12 +#define SLAVE_IMEM 13 +#define SLAVE_PIMEM 14 +#define SLAVE_SERVICE_SNOC 15 +#define SLAVE_PCIE_0 16 +#define SLAVE_QDSS_STM 17 +#define SLAVE_TCU 18 + +#endif + diff --git a/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h b/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h new file mode 100644 index 000000000000..c5eeafa1b1dd --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,qcs8300-rpmh.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCS8300_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_QCS8300_H + +#define MASTER_QUP_3 0 +#define MASTER_EMAC 1 +#define MASTER_SDC 2 +#define MASTER_UFS_MEM 3 +#define MASTER_USB2 4 +#define MASTER_USB3_0 5 +#define SLAVE_A1NOC_SNOC 6 + +#define MASTER_QDSS_BAM 0 +#define MASTER_QUP_0 1 +#define MASTER_QUP_1 2 +#define MASTER_CNOC_A2NOC 3 +#define MASTER_CRYPTO_CORE0 4 +#define MASTER_CRYPTO_CORE1 5 +#define MASTER_IPA 6 +#define MASTER_QDSS_ETR_0 7 +#define MASTER_QDSS_ETR_1 8 +#define SLAVE_A2NOC_SNOC 9 + +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define MASTER_QUP_CORE_3 2 +#define SLAVE_QUP_CORE_0 3 +#define SLAVE_QUP_CORE_1 4 +#define SLAVE_QUP_CORE_3 5 + +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define SLAVE_AHB2PHY_2 2 +#define SLAVE_AHB2PHY_3 3 +#define SLAVE_ANOC_THROTTLE_CFG 4 +#define SLAVE_AOSS 5 +#define SLAVE_APPSS 6 +#define SLAVE_BOOT_ROM 7 +#define SLAVE_CAMERA_CFG 8 +#define SLAVE_CAMERA_NRT_THROTTLE_CFG 9 +#define SLAVE_CAMERA_RT_THROTTLE_CFG 10 +#define SLAVE_CLK_CTL 11 +#define SLAVE_CDSP_CFG 12 +#define SLAVE_RBCPR_CX_CFG 13 +#define SLAVE_RBCPR_MMCX_CFG 14 +#define SLAVE_RBCPR_MX_CFG 15 +#define SLAVE_CPR_NSPCX 16 +#define SLAVE_CPR_NSPHMX 17 +#define SLAVE_CRYPTO_0_CFG 18 +#define SLAVE_CX_RDPM 19 +#define SLAVE_DISPLAY_CFG 20 +#define SLAVE_DISPLAY_RT_THROTTLE_CFG 21 +#define SLAVE_EMAC_CFG 22 +#define SLAVE_GP_DSP0_CFG 23 +#define SLAVE_GPDSP0_THROTTLE_CFG 24 +#define SLAVE_GPU_TCU_THROTTLE_CFG 25 +#define SLAVE_GFX3D_CFG 26 +#define SLAVE_HWKM 27 +#define SLAVE_IMEM_CFG 28 +#define SLAVE_IPA_CFG 29 +#define SLAVE_IPC_ROUTER_CFG 30 +#define SLAVE_LPASS 31 +#define SLAVE_LPASS_THROTTLE_CFG 32 +#define SLAVE_MX_RDPM 33 +#define SLAVE_MXC_RDPM 34 +#define SLAVE_PCIE_0_CFG 35 +#define SLAVE_PCIE_1_CFG 36 +#define SLAVE_PCIE_TCU_THROTTLE_CFG 37 +#define SLAVE_PCIE_THROTTLE_CFG 38 +#define SLAVE_PDM 39 +#define SLAVE_PIMEM_CFG 40 +#define SLAVE_PKA_WRAPPER_CFG 41 +#define SLAVE_QDSS_CFG 42 +#define SLAVE_QM_CFG 43 +#define SLAVE_QM_MPU_CFG 44 +#define SLAVE_QUP_0 45 +#define SLAVE_QUP_1 46 +#define SLAVE_QUP_3 47 +#define SLAVE_SAIL_THROTTLE_CFG 48 +#define SLAVE_SDC1 49 +#define SLAVE_SECURITY 50 +#define SLAVE_SNOC_THROTTLE_CFG 51 +#define SLAVE_TCSR 52 +#define SLAVE_TLMM 53 +#define SLAVE_TSC_CFG 54 +#define SLAVE_UFS_MEM_CFG 55 +#define SLAVE_USB2 56 +#define SLAVE_USB3_0 57 +#define SLAVE_VENUS_CFG 58 +#define SLAVE_VENUS_CVP_THROTTLE_CFG 59 +#define SLAVE_VENUS_V_CPU_THROTTLE_CFG 60 +#define SLAVE_VENUS_VCODEC_THROTTLE_CFG 61 +#define SLAVE_DDRSS_CFG 62 +#define SLAVE_GPDSP_NOC_CFG 63 +#define SLAVE_CNOC_MNOC_HF_CFG 64 +#define SLAVE_CNOC_MNOC_SF_CFG 65 +#define SLAVE_PCIE_ANOC_CFG 66 +#define SLAVE_SNOC_CFG 67 +#define SLAVE_BOOT_IMEM 68 +#define SLAVE_IMEM 69 +#define SLAVE_PIMEM 70 +#define SLAVE_PCIE_0 71 +#define SLAVE_PCIE_1 72 +#define SLAVE_QDSS_STM 73 +#define SLAVE_TCU 74 + +#define MASTER_CNOC_DC_NOC 0 +#define SLAVE_LLCC_CFG 1 +#define SLAVE_GEM_NOC_CFG 2 + +#define MASTER_GPU_TCU 0 +#define MASTER_PCIE_TCU 1 +#define MASTER_SYS_TCU 2 +#define MASTER_APPSS_PROC 3 +#define MASTER_COMPUTE_NOC 4 +#define MASTER_GEM_NOC_CFG 5 +#define MASTER_GPDSP_SAIL 6 +#define MASTER_GFX3D 7 +#define MASTER_MNOC_HF_MEM_NOC 8 +#define MASTER_MNOC_SF_MEM_NOC 9 +#define MASTER_ANOC_PCIE_GEM_NOC 10 +#define MASTER_SNOC_GC_MEM_NOC 11 +#define MASTER_SNOC_SF_MEM_NOC 12 +#define SLAVE_GEM_NOC_CNOC 13 +#define SLAVE_LLCC 14 +#define SLAVE_GEM_NOC_PCIE_CNOC 15 +#define SLAVE_SERVICE_GEM_NOC_1 16 +#define SLAVE_SERVICE_GEM_NOC_2 17 +#define SLAVE_SERVICE_GEM_NOC 18 +#define SLAVE_SERVICE_GEM_NOC2 19 + +#define MASTER_SAILSS_MD0 0 +#define MASTER_DSP0 1 +#define SLAVE_GP_DSP_SAIL_NOC 2 + +#define MASTER_CNOC_LPASS_AG_NOC 0 +#define MASTER_LPASS_PROC 1 +#define SLAVE_LPASS_CORE_CFG 2 +#define SLAVE_LPASS_LPI_CFG 3 +#define SLAVE_LPASS_MPU_CFG 4 +#define SLAVE_LPASS_TOP_CFG 5 +#define SLAVE_LPASS_SNOC 6 +#define SLAVE_SERVICES_LPASS_AML_NOC 7 +#define SLAVE_SERVICE_LPASS_AG_NOC 8 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CAMNOC_HF 0 +#define MASTER_CAMNOC_ICP 1 +#define MASTER_CAMNOC_SF 2 +#define MASTER_MDP0 3 +#define MASTER_MDP1 4 +#define MASTER_CNOC_MNOC_HF_CFG 5 +#define MASTER_CNOC_MNOC_SF_CFG 6 +#define MASTER_VIDEO_P0 7 +#define MASTER_VIDEO_PROC 8 +#define MASTER_VIDEO_V_PROC 9 +#define SLAVE_MNOC_HF_MEM_NOC 10 +#define SLAVE_MNOC_SF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC_HF 12 +#define SLAVE_SERVICE_MNOC_SF 13 + +#define MASTER_CDSP_NOC_CFG 0 +#define MASTER_CDSP_PROC 1 +#define SLAVE_HCP_A 2 +#define SLAVE_CDSP_MEM_NOC 3 +#define SLAVE_SERVICE_NSP_NOC 4 + +#define MASTER_PCIE_0 0 +#define MASTER_PCIE_1 1 +#define SLAVE_ANOC_PCIE_GEM_NOC 2 + +#define MASTER_GIC_AHB 0 +#define MASTER_A1NOC_SNOC 1 +#define MASTER_A2NOC_SNOC 2 +#define MASTER_LPASS_ANOC 3 +#define MASTER_SNOC_CFG 4 +#define MASTER_PIMEM 5 +#define MASTER_GIC 6 +#define SLAVE_SNOC_GEM_NOC_GC 7 +#define SLAVE_SNOC_GEM_NOC_SF 8 +#define SLAVE_SERVICE_SNOC 9 + +#endif diff --git a/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h b/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h new file mode 100644 index 000000000000..aec7cbb7cd70 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sar2130p-rpmh.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2024, Linaro Ltd. + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SAR2130P_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SAR2130P_H + +#define MASTER_QUP_CORE_0 0 +#define MASTER_QUP_CORE_1 1 +#define SLAVE_QUP_CORE_0 2 +#define SLAVE_QUP_CORE_1 3 + +#define MASTER_GEM_NOC_CNOC 0 +#define MASTER_GEM_NOC_PCIE_SNOC 1 +#define MASTER_QDSS_DAP 2 +#define SLAVE_AHB2PHY_SOUTH 3 +#define SLAVE_AOSS 4 +#define SLAVE_CAMERA_CFG 5 +#define SLAVE_CLK_CTL 6 +#define SLAVE_CDSP_CFG 7 +#define SLAVE_RBCPR_CX_CFG 8 +#define SLAVE_RBCPR_MMCX_CFG 9 +#define SLAVE_RBCPR_MXA_CFG 10 +#define SLAVE_RBCPR_MXC_CFG 11 +#define SLAVE_CPR_NSPCX 12 +#define SLAVE_CRYPTO_0_CFG 13 +#define SLAVE_CX_RDPM 14 +#define SLAVE_DISPLAY_CFG 15 +#define SLAVE_GFX3D_CFG 16 +#define SLAVE_IMEM_CFG 17 +#define SLAVE_IPC_ROUTER_CFG 18 +#define SLAVE_LPASS 19 +#define SLAVE_MX_RDPM 20 +#define SLAVE_PCIE_0_CFG 21 +#define SLAVE_PCIE_1_CFG 22 +#define SLAVE_PDM 23 +#define SLAVE_PIMEM_CFG 24 +#define SLAVE_PRNG 25 +#define SLAVE_QDSS_CFG 26 +#define SLAVE_QSPI_0 27 +#define SLAVE_QUP_0 28 +#define SLAVE_QUP_1 29 +#define SLAVE_SDCC_1 30 +#define SLAVE_TCSR 31 +#define SLAVE_TLMM 32 +#define SLAVE_TME_CFG 33 +#define SLAVE_USB3_0 34 +#define SLAVE_VENUS_CFG 35 +#define SLAVE_VSENSE_CTRL_CFG 36 +#define SLAVE_WLAN_Q6_CFG 37 +#define SLAVE_DDRSS_CFG 38 +#define SLAVE_CNOC_MNOC_CFG 39 +#define SLAVE_SNOC_CFG 40 +#define SLAVE_IMEM 41 +#define SLAVE_PIMEM 42 +#define SLAVE_SERVICE_CNOC 43 +#define SLAVE_PCIE_0 44 +#define SLAVE_PCIE_1 45 +#define SLAVE_QDSS_STM 46 +#define SLAVE_TCU 47 + +#define MASTER_GPU_TCU 0 +#define MASTER_SYS_TCU 1 +#define MASTER_APPSS_PROC 2 +#define MASTER_GFX3D 3 +#define MASTER_MNOC_HF_MEM_NOC 4 +#define MASTER_MNOC_SF_MEM_NOC 5 +#define MASTER_COMPUTE_NOC 6 +#define MASTER_ANOC_PCIE_GEM_NOC 7 +#define MASTER_SNOC_GC_MEM_NOC 8 +#define MASTER_SNOC_SF_MEM_NOC 9 +#define MASTER_WLAN_Q6 10 +#define SLAVE_GEM_NOC_CNOC 11 +#define SLAVE_LLCC 12 +#define SLAVE_MEM_NOC_PCIE_SNOC 13 + +#define MASTER_CNOC_LPASS_AG_NOC 0 +#define MASTER_LPASS_PROC 1 +#define SLAVE_LPASS_CORE_CFG 2 +#define SLAVE_LPASS_LPI_CFG 3 +#define SLAVE_LPASS_MPU_CFG 4 +#define SLAVE_LPASS_TOP_CFG 5 +#define SLAVE_LPASS_SNOC 6 +#define SLAVE_SERVICES_LPASS_AML_NOC 7 +#define SLAVE_SERVICE_LPASS_AG_NOC 8 + +#define MASTER_LLCC 0 +#define SLAVE_EBI1 1 + +#define MASTER_CAMNOC_HF 0 +#define MASTER_CAMNOC_ICP 1 +#define MASTER_CAMNOC_SF 2 +#define MASTER_LSR 3 +#define MASTER_MDP 4 +#define MASTER_CNOC_MNOC_CFG 5 +#define MASTER_VIDEO 6 +#define MASTER_VIDEO_CV_PROC 7 +#define MASTER_VIDEO_PROC 8 +#define MASTER_VIDEO_V_PROC 9 +#define SLAVE_MNOC_HF_MEM_NOC 10 +#define SLAVE_MNOC_SF_MEM_NOC 11 +#define SLAVE_SERVICE_MNOC 12 + +#define MASTER_CDSP_NOC_CFG 0 +#define MASTER_CDSP_PROC 1 +#define SLAVE_CDSP_MEM_NOC 2 +#define SLAVE_SERVICE_NSP_NOC 3 + +#define MASTER_PCIE_0 0 +#define MASTER_PCIE_1 1 +#define SLAVE_ANOC_PCIE_GEM_NOC 2 + +#define MASTER_GIC_AHB 0 +#define MASTER_QDSS_BAM 1 +#define MASTER_QSPI_0 2 +#define MASTER_QUP_0 3 +#define MASTER_QUP_1 4 +#define MASTER_A2NOC_SNOC 5 +#define MASTER_CNOC_DATAPATH 6 +#define MASTER_LPASS_ANOC 7 +#define MASTER_SNOC_CFG 8 +#define MASTER_CRYPTO 9 +#define MASTER_PIMEM 10 +#define MASTER_GIC 11 +#define MASTER_QDSS_ETR 12 +#define MASTER_QDSS_ETR_1 13 +#define MASTER_SDCC_1 14 +#define MASTER_USB3_0 15 +#define SLAVE_A2NOC_SNOC 16 +#define SLAVE_SNOC_GEM_NOC_GC 17 +#define SLAVE_SNOC_GEM_NOC_SF 18 +#define SLAVE_SERVICE_SNOC 19 + +#endif diff --git a/include/dt-bindings/power/mediatek,mt6735-power-controller.h b/include/dt-bindings/power/mediatek,mt6735-power-controller.h new file mode 100644 index 000000000000..6957075fcb9e --- /dev/null +++ b/include/dt-bindings/power/mediatek,mt6735-power-controller.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H +#define _DT_BINDINGS_POWER_MT6735_POWER_CONTROLLER_H + +#define MT6735_POWER_DOMAIN_MD1 0 +#define MT6735_POWER_DOMAIN_CONN 1 +#define MT6735_POWER_DOMAIN_DIS 2 +#define MT6735_POWER_DOMAIN_MFG 3 +#define MT6735_POWER_DOMAIN_ISP 4 +#define MT6735_POWER_DOMAIN_VDE 5 +#define MT6735_POWER_DOMAIN_VEN 6 + +#endif diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h index 608087fb9a3d..df599bf46220 100644 --- a/include/dt-bindings/power/qcom-rpmpd.h +++ b/include/dt-bindings/power/qcom-rpmpd.h @@ -218,6 +218,7 @@ /* SDM845 Power Domain performance levels */ #define RPMH_REGULATOR_LEVEL_RETENTION 16 #define RPMH_REGULATOR_LEVEL_MIN_SVS 48 +#define RPMH_REGULATOR_LEVEL_LOW_SVS_D3 50 #define RPMH_REGULATOR_LEVEL_LOW_SVS_D2 52 #define RPMH_REGULATOR_LEVEL_LOW_SVS_D1 56 #define RPMH_REGULATOR_LEVEL_LOW_SVS_D0 60 @@ -238,6 +239,7 @@ #define RPMH_REGULATOR_LEVEL_TURBO_L1 416 #define RPMH_REGULATOR_LEVEL_TURBO_L2 432 #define RPMH_REGULATOR_LEVEL_TURBO_L3 448 +#define RPMH_REGULATOR_LEVEL_TURBO_L4 452 #define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464 #define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480 diff --git a/include/dt-bindings/reset/aspeed,ast2700-scu.h b/include/dt-bindings/reset/aspeed,ast2700-scu.h new file mode 100644 index 000000000000..d53c719b7a66 --- /dev/null +++ b/include/dt-bindings/reset/aspeed,ast2700-scu.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Device Tree binding constants for AST2700 reset controller. + * + * Copyright (c) 2024 Aspeed Technology Inc. + */ + +#ifndef _MACH_ASPEED_AST2700_RESET_H_ +#define _MACH_ASPEED_AST2700_RESET_H_ + +/* SOC0 */ +#define SCU0_RESET_SDRAM 0 +#define SCU0_RESET_DDRPHY 1 +#define SCU0_RESET_RSA 2 +#define SCU0_RESET_SHA3 3 +#define SCU0_RESET_HACE 4 +#define SCU0_RESET_SOC 5 +#define SCU0_RESET_VIDEO 6 +#define SCU0_RESET_2D 7 +#define SCU0_RESET_PCIS 8 +#define SCU0_RESET_RVAS0 9 +#define SCU0_RESET_RVAS1 10 +#define SCU0_RESET_SM3 11 +#define SCU0_RESET_SM4 12 +#define SCU0_RESET_CRT0 13 +#define SCU0_RESET_ECC 14 +#define SCU0_RESET_DP_PCI 15 +#define SCU0_RESET_UFS 16 +#define SCU0_RESET_EMMC 17 +#define SCU0_RESET_PCIE1RST 18 +#define SCU0_RESET_PCIE1RSTOE 19 +#define SCU0_RESET_PCIE0RST 20 +#define SCU0_RESET_PCIE0RSTOE 21 +#define SCU0_RESET_JTAG 22 +#define SCU0_RESET_MCTP0 23 +#define SCU0_RESET_MCTP1 24 +#define SCU0_RESET_XDMA0 25 +#define SCU0_RESET_XDMA1 26 +#define SCU0_RESET_H2X1 27 +#define SCU0_RESET_DP 28 +#define SCU0_RESET_DP_MCU 29 +#define SCU0_RESET_SSP 30 +#define SCU0_RESET_H2X0 31 +#define SCU0_RESET_PORTA_VHUB 32 +#define SCU0_RESET_PORTA_PHY3 33 +#define SCU0_RESET_PORTA_XHCI 34 +#define SCU0_RESET_PORTB_VHUB 35 +#define SCU0_RESET_PORTB_PHY3 36 +#define SCU0_RESET_PORTB_XHCI 37 +#define SCU0_RESET_PORTA_VHUB_EHCI 38 +#define SCU0_RESET_PORTB_VHUB_EHCI 39 +#define SCU0_RESET_UHCI 40 +#define SCU0_RESET_TSP 41 +#define SCU0_RESET_E2M0 42 +#define SCU0_RESET_E2M1 43 +#define SCU0_RESET_VLINK 44 + +/* SOC1 */ +#define SCU1_RESET_LPC0 0 +#define SCU1_RESET_LPC1 1 +#define SCU1_RESET_MII 2 +#define SCU1_RESET_PECI 3 +#define SCU1_RESET_PWM 4 +#define SCU1_RESET_MAC0 5 +#define SCU1_RESET_MAC1 6 +#define SCU1_RESET_MAC2 7 +#define SCU1_RESET_ADC 8 +#define SCU1_RESET_SD 9 +#define SCU1_RESET_ESPI0 10 +#define SCU1_RESET_ESPI1 11 +#define SCU1_RESET_JTAG1 12 +#define SCU1_RESET_SPI0 13 +#define SCU1_RESET_SPI1 14 +#define SCU1_RESET_SPI2 15 +#define SCU1_RESET_I3C0 16 +#define SCU1_RESET_I3C1 17 +#define SCU1_RESET_I3C2 18 +#define SCU1_RESET_I3C3 19 +#define SCU1_RESET_I3C4 20 +#define SCU1_RESET_I3C5 21 +#define SCU1_RESET_I3C6 22 +#define SCU1_RESET_I3C7 23 +#define SCU1_RESET_I3C8 24 +#define SCU1_RESET_I3C9 25 +#define SCU1_RESET_I3C10 26 +#define SCU1_RESET_I3C11 27 +#define SCU1_RESET_I3C12 28 +#define SCU1_RESET_I3C13 29 +#define SCU1_RESET_I3C14 30 +#define SCU1_RESET_I3C15 31 +#define SCU1_RESET_MCU0 32 +#define SCU1_RESET_MCU1 33 +#define SCU1_RESET_H2A_SPI1 34 +#define SCU1_RESET_H2A_SPI2 35 +#define SCU1_RESET_UART0 36 +#define SCU1_RESET_UART1 37 +#define SCU1_RESET_UART2 38 +#define SCU1_RESET_UART3 39 +#define SCU1_RESET_I2C_FILTER 40 +#define SCU1_RESET_CALIPTRA 41 +#define SCU1_RESET_XDMA 42 +#define SCU1_RESET_FSI 43 +#define SCU1_RESET_CAN 44 +#define SCU1_RESET_MCTP 45 +#define SCU1_RESET_I2C 46 +#define SCU1_RESET_UART6 47 +#define SCU1_RESET_UART7 48 +#define SCU1_RESET_UART8 49 +#define SCU1_RESET_UART9 50 +#define SCU1_RESET_LTPI0 51 +#define SCU1_RESET_VGAL 52 +#define SCU1_RESET_LTPI1 53 +#define SCU1_RESET_ACE 54 +#define SCU1_RESET_E2M 55 +#define SCU1_RESET_UHCI 56 +#define SCU1_RESET_PORTC_USB2UART 57 +#define SCU1_RESET_PORTC_VHUB_EHCI 58 +#define SCU1_RESET_PORTD_USB2UART 59 +#define SCU1_RESET_PORTD_VHUB_EHCI 60 +#define SCU1_RESET_H2X 61 +#define SCU1_RESET_I3CDMA 62 +#define SCU1_RESET_PCIE2RST 63 + +#endif /* _MACH_ASPEED_AST2700_RESET_H_ */ diff --git a/include/dt-bindings/reset/mediatek,mt6735-infracfg.h b/include/dt-bindings/reset/mediatek,mt6735-infracfg.h new file mode 100644 index 000000000000..9df969090377 --- /dev/null +++ b/include/dt-bindings/reset/mediatek,mt6735-infracfg.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_RESET_MT6735_INFRACFG_H +#define _DT_BINDINGS_RESET_MT6735_INFRACFG_H + +#define MT6735_INFRA_RST0_EMI_REG 0 +#define MT6735_INFRA_RST0_DRAMC0_AO 1 +#define MT6735_INFRA_RST0_AP_CIRQ_EINT 2 +#define MT6735_INFRA_RST0_APXGPT 3 +#define MT6735_INFRA_RST0_SCPSYS 4 +#define MT6735_INFRA_RST0_KP 5 +#define MT6735_INFRA_RST0_PMIC_WRAP 6 +#define MT6735_INFRA_RST0_CLDMA_AO_TOP 7 +#define MT6735_INFRA_RST0_USBSIF_TOP 8 +#define MT6735_INFRA_RST0_EMI 9 +#define MT6735_INFRA_RST0_CCIF 10 +#define MT6735_INFRA_RST0_DRAMC0 11 +#define MT6735_INFRA_RST0_EMI_AO_REG 12 +#define MT6735_INFRA_RST0_CCIF_AO 13 +#define MT6735_INFRA_RST0_TRNG 14 +#define MT6735_INFRA_RST0_SYS_CIRQ 15 +#define MT6735_INFRA_RST0_GCE 16 +#define MT6735_INFRA_RST0_M4U 17 +#define MT6735_INFRA_RST0_CCIF1 18 +#define MT6735_INFRA_RST0_CLDMA_TOP_PD 19 + +#endif diff --git a/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h b/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h new file mode 100644 index 000000000000..c489242b226e --- /dev/null +++ b/include/dt-bindings/reset/mediatek,mt6735-mfgcfg.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_RESET_MT6735_MFGCFG_H +#define _DT_BINDINGS_RESET_MT6735_MFGCFG_H + +#define MT6735_MFG_RST0_AXI 0 +#define MT6735_MFG_RST0_G3D 1 + +#endif /* _DT_BINDINGS_RESET_MT6735_MFGCFG_H */ diff --git a/include/dt-bindings/reset/mediatek,mt6735-pericfg.h b/include/dt-bindings/reset/mediatek,mt6735-pericfg.h new file mode 100644 index 000000000000..a62bb192835a --- /dev/null +++ b/include/dt-bindings/reset/mediatek,mt6735-pericfg.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_RESET_MT6735_PERICFG_H +#define _DT_BINDINGS_RESET_MT6735_PERICFG_H + +#define MT6735_PERI_RST0_UART0 0 +#define MT6735_PERI_RST0_UART1 1 +#define MT6735_PERI_RST0_UART2 2 +#define MT6735_PERI_RST0_UART3 3 +#define MT6735_PERI_RST0_UART4 4 +#define MT6735_PERI_RST0_BTIF 5 +#define MT6735_PERI_RST0_DISP_PWM_PERI 6 +#define MT6735_PERI_RST0_PWM 7 +#define MT6735_PERI_RST0_AUXADC 8 +#define MT6735_PERI_RST0_DMA 9 +#define MT6735_PERI_RST0_IRDA 10 +#define MT6735_PERI_RST0_IRTX 11 +#define MT6735_PERI_RST0_THERM 12 +#define MT6735_PERI_RST0_MSDC2 13 +#define MT6735_PERI_RST0_MSDC3 14 +#define MT6735_PERI_RST0_MSDC0 15 +#define MT6735_PERI_RST0_MSDC1 16 +#define MT6735_PERI_RST0_I2C0 17 +#define MT6735_PERI_RST0_I2C1 18 +#define MT6735_PERI_RST0_I2C2 19 +#define MT6735_PERI_RST0_I2C3 20 +#define MT6735_PERI_RST0_USB 21 + +#define MT6735_PERI_RST1_SPI0 22 + +#endif diff --git a/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h b/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h new file mode 100644 index 000000000000..b6ae5d249192 --- /dev/null +++ b/include/dt-bindings/reset/mediatek,mt6735-vdecsys.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ + +#ifndef _DT_BINDINGS_RESET_MT6735_VDECSYS_H +#define _DT_BINDINGS_RESET_MT6735_VDECSYS_H + +#define MT6735_VDEC_RST0_VDEC 0 +#define MT6735_VDEC_RST1_SMI_LARB1 1 + +#endif /* _DT_BINDINGS_RESET_MT6735_VDECSYS_H */ diff --git a/include/dt-bindings/reset/qcom,ipq5424-gcc.h b/include/dt-bindings/reset/qcom,ipq5424-gcc.h new file mode 100644 index 000000000000..16a72771c79a --- /dev/null +++ b/include/dt-bindings/reset/qcom,ipq5424-gcc.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2018,2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_RESET_IPQ_GCC_IPQ5424_H +#define _DT_BINDINGS_RESET_IPQ_GCC_IPQ5424_H + +#define GCC_QUPV3_BCR 0 +#define GCC_QUPV3_I2C0_BCR 1 +#define GCC_QUPV3_UART0_BCR 2 +#define GCC_QUPV3_I2C1_BCR 3 +#define GCC_QUPV3_UART1_BCR 4 +#define GCC_QUPV3_SPI0_BCR 5 +#define GCC_QUPV3_SPI1_BCR 6 +#define GCC_IMEM_BCR 7 +#define GCC_TME_BCR 8 +#define GCC_DDRSS_BCR 9 +#define GCC_PRNG_BCR 10 +#define GCC_BOOT_ROM_BCR 11 +#define GCC_NSS_BCR 12 +#define GCC_MDIO_BCR 13 +#define GCC_UNIPHY0_BCR 14 +#define GCC_UNIPHY1_BCR 15 +#define GCC_UNIPHY2_BCR 16 +#define GCC_WCSS_BCR 17 +#define GCC_SEC_CTRL_BCR 19 +#define GCC_TME_SEC_BUS_BCR 20 +#define GCC_ADSS_BCR 21 +#define GCC_LPASS_BCR 22 +#define GCC_PCIE0_BCR 23 +#define GCC_PCIE0_LINK_DOWN_BCR 24 +#define GCC_PCIE0PHY_PHY_BCR 25 +#define GCC_PCIE0_PHY_BCR 26 +#define GCC_PCIE1_BCR 27 +#define GCC_PCIE1_LINK_DOWN_BCR 28 +#define GCC_PCIE1PHY_PHY_BCR 29 +#define GCC_PCIE1_PHY_BCR 30 +#define GCC_PCIE2_BCR 31 +#define GCC_PCIE2_LINK_DOWN_BCR 32 +#define GCC_PCIE2PHY_PHY_BCR 33 +#define GCC_PCIE2_PHY_BCR 34 +#define GCC_PCIE3_BCR 35 +#define GCC_PCIE3_LINK_DOWN_BCR 36 +#define GCC_PCIE3PHY_PHY_BCR 37 +#define GCC_PCIE3_PHY_BCR 38 +#define GCC_USB_BCR 39 +#define GCC_QUSB2_0_PHY_BCR 40 +#define GCC_USB0_PHY_BCR 41 +#define GCC_USB3PHY_0_PHY_BCR 42 +#define GCC_QDSS_BCR 43 +#define GCC_SNOC_BCR 44 +#define GCC_ANOC_BCR 45 +#define GCC_PCNOC_BCR 46 +#define GCC_PCNOC_BUS_TIMEOUT0_BCR 47 +#define GCC_PCNOC_BUS_TIMEOUT1_BCR 48 +#define GCC_PCNOC_BUS_TIMEOUT2_BCR 49 +#define GCC_PCNOC_BUS_TIMEOUT3_BCR 50 +#define GCC_PCNOC_BUS_TIMEOUT4_BCR 51 +#define GCC_PCNOC_BUS_TIMEOUT5_BCR 52 +#define GCC_PCNOC_BUS_TIMEOUT6_BCR 53 +#define GCC_PCNOC_BUS_TIMEOUT7_BCR 54 +#define GCC_PCNOC_BUS_TIMEOUT8_BCR 55 +#define GCC_PCNOC_BUS_TIMEOUT9_BCR 56 +#define GCC_QPIC_BCR 57 +#define GCC_SDCC_BCR 58 +#define GCC_DCC_BCR 59 +#define GCC_SPDM_BCR 60 +#define GCC_MPM_BCR 61 +#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 62 +#define GCC_RBCPR_BCR 63 +#define GCC_CMN_BLK_BCR 64 +#define GCC_TCSR_BCR 65 +#define GCC_TLMM_BCR 66 +#define GCC_QUPV3_AHB_MST_ARES 67 +#define GCC_QUPV3_CORE_ARES 68 +#define GCC_QUPV3_2X_CORE_ARES 69 +#define GCC_QUPV3_SLEEP_ARES 70 +#define GCC_QUPV3_AHB_SLV_ARES 71 +#define GCC_QUPV3_I2C0_ARES 72 +#define GCC_QUPV3_UART0_ARES 73 +#define GCC_QUPV3_I2C1_ARES 74 +#define GCC_QUPV3_UART1_ARES 75 +#define GCC_QUPV3_SPI0_ARES 76 +#define GCC_QUPV3_SPI1_ARES 77 +#define GCC_DEBUG_ARES 78 +#define GCC_GP1_ARES 79 +#define GCC_GP2_ARES 80 +#define GCC_GP3_ARES 81 +#define GCC_IMEM_AXI_ARES 82 +#define GCC_IMEM_CFG_AHB_ARES 83 +#define GCC_TME_ARES 84 +#define GCC_TME_TS_ARES 85 +#define GCC_TME_SLOW_ARES 86 +#define GCC_TME_RTC_TOGGLE_ARES 87 +#define GCC_TIC_ARES 88 +#define GCC_PRNG_AHB_ARES 89 +#define GCC_BOOT_ROM_AHB_ARES 90 +#define GCC_NSSNOC_ATB_ARES 91 +#define GCC_NSS_TS_ARES 92 +#define GCC_NSSNOC_QOSGEN_REF_ARES 93 +#define GCC_NSSNOC_TIMEOUT_REF_ARES 94 +#define GCC_NSSNOC_MEMNOC_ARES 95 +#define GCC_NSSNOC_SNOC_ARES 96 +#define GCC_NSSCFG_ARES 97 +#define GCC_NSSNOC_NSSCC_ARES 98 +#define GCC_NSSCC_ARES 99 +#define GCC_MDIO_AHB_ARES 100 +#define GCC_UNIPHY0_SYS_ARES 101 +#define GCC_UNIPHY0_AHB_ARES 102 +#define GCC_UNIPHY1_SYS_ARES 103 +#define GCC_UNIPHY1_AHB_ARES 104 +#define GCC_UNIPHY2_SYS_ARES 105 +#define GCC_UNIPHY2_AHB_ARES 106 +#define GCC_NSSNOC_XO_DCD_ARES 107 +#define GCC_NSSNOC_SNOC_1_ARES 108 +#define GCC_NSSNOC_PCNOC_1_ARES 109 +#define GCC_NSSNOC_MEMNOC_1_ARES 110 +#define GCC_DDRSS_ATB_ARES 111 +#define GCC_DDRSS_AHB_ARES 112 +#define GCC_GEMNOC_AHB_ARES 113 +#define GCC_GEMNOC_Q6_AXI_ARES 114 +#define GCC_GEMNOC_NSSNOC_ARES 115 +#define GCC_GEMNOC_SNOC_ARES 116 +#define GCC_GEMNOC_APSS_ARES 117 +#define GCC_GEMNOC_QOSGEN_EXTREF_ARES 118 +#define GCC_GEMNOC_TS_ARES 119 +#define GCC_DDRSS_SMS_SLOW_ARES 120 +#define GCC_GEMNOC_CNOC_ARES 121 +#define GCC_GEMNOC_XO_DBG_ARES 122 +#define GCC_GEMNOC_ANOC_ARES 123 +#define GCC_DDRSS_LLCC_ATB_ARES 124 +#define GCC_LLCC_TPDM_CFG_ARES 125 +#define GCC_TME_BUS_ARES 126 +#define GCC_SEC_CTRL_ACC_ARES 127 +#define GCC_SEC_CTRL_ARES 128 +#define GCC_SEC_CTRL_SENSE_ARES 129 +#define GCC_SEC_CTRL_AHB_ARES 130 +#define GCC_SEC_CTRL_BOOT_ROM_PATCH_ARES 131 +#define GCC_ADSS_PWM_ARES 132 +#define GCC_TME_ATB_ARES 133 +#define GCC_TME_DBGAPB_ARES 134 +#define GCC_TME_DEBUG_ARES 135 +#define GCC_TME_AT_ARES 136 +#define GCC_TME_APB_ARES 137 +#define GCC_TME_DMI_DBG_HS_ARES 138 +#define GCC_APSS_AHB_ARES 139 +#define GCC_APSS_AXI_ARES 140 +#define GCC_CPUSS_TRIG_ARES 141 +#define GCC_APSS_DBG_ARES 142 +#define GCC_APSS_TS_ARES 143 +#define GCC_APSS_ATB_ARES 144 +#define GCC_Q6_AXIM_ARES 145 +#define GCC_Q6_AXIS_ARES 146 +#define GCC_Q6_AHB_ARES 147 +#define GCC_Q6_AHB_S_ARES 148 +#define GCC_Q6SS_ATBM_ARES 149 +#define GCC_Q6_TSCTR_1TO2_ARES 150 +#define GCC_Q6SS_PCLKDBG_ARES 151 +#define GCC_Q6SS_TRIG_ARES 152 +#define GCC_Q6SS_BOOT_CBCR_ARES 153 +#define GCC_WCSS_DBG_IFC_APB_ARES 154 +#define GCC_WCSS_DBG_IFC_ATB_ARES 155 +#define GCC_WCSS_DBG_IFC_NTS_ARES 156 +#define GCC_WCSS_DBG_IFC_DAPBUS_ARES 157 +#define GCC_WCSS_DBG_IFC_APB_BDG_ARES 158 +#define GCC_WCSS_DBG_IFC_NTS_BDG_ARES 159 +#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_ARES 160 +#define GCC_WCSS_ECAHB_ARES 161 +#define GCC_WCSS_ACMT_ARES 162 +#define GCC_WCSS_AHB_S_ARES 163 +#define GCC_WCSS_AXI_M_ARES 164 +#define GCC_PCNOC_WAPSS_ARES 165 +#define GCC_SNOC_WAPSS_ARES 166 +#define GCC_LPASS_SWAY_ARES 167 +#define GCC_LPASS_CORE_AXIM_ARES 168 +#define GCC_PCIE0_AHB_ARES 169 +#define GCC_PCIE0_AXI_M_ARES 170 +#define GCC_PCIE0_AXI_S_ARES 171 +#define GCC_PCIE0_AXI_S_BRIDGE_ARES 172 +#define GCC_PCIE0_PIPE_ARES 173 +#define GCC_PCIE0_AUX_ARES 174 +#define GCC_PCIE1_AHB_ARES 175 +#define GCC_PCIE1_AXI_M_ARES 176 +#define GCC_PCIE1_AXI_S_ARES 177 +#define GCC_PCIE1_AXI_S_BRIDGE_ARES 178 +#define GCC_PCIE1_PIPE_ARES 179 +#define GCC_PCIE1_AUX_ARES 180 +#define GCC_PCIE2_AHB_ARES 181 +#define GCC_PCIE2_AXI_M_ARES 182 +#define GCC_PCIE2_AXI_S_ARES 183 +#define GCC_PCIE2_AXI_S_BRIDGE_ARES 184 +#define GCC_PCIE2_PIPE_ARES 185 +#define GCC_PCIE2_AUX_ARES 186 +#define GCC_PCIE3_AHB_ARES 187 +#define GCC_PCIE3_AXI_M_ARES 188 +#define GCC_PCIE3_AXI_S_ARES 189 +#define GCC_PCIE3_AXI_S_BRIDGE_ARES 190 +#define GCC_PCIE3_PIPE_ARES 191 +#define GCC_PCIE3_AUX_ARES 192 +#define GCC_USB0_MASTER_ARES 193 +#define GCC_USB0_AUX_ARES 194 +#define GCC_USB0_MOCK_UTMI_ARES 195 +#define GCC_USB0_PIPE_ARES 196 +#define GCC_USB0_SLEEP_ARES 197 +#define GCC_USB0_PHY_CFG_AHB_ARES 198 +#define GCC_QDSS_AT_ARES 199 +#define GCC_QDSS_STM_ARES 200 +#define GCC_QDSS_TRACECLKIN_ARES 201 +#define GCC_QDSS_TSCTR_DIV2_ARES 202 +#define GCC_QDSS_TSCTR_DIV3_ARES 203 +#define GCC_QDSS_TSCTR_DIV4_ARES 204 +#define GCC_QDSS_TSCTR_DIV8_ARES 205 +#define GCC_QDSS_TSCTR_DIV16_ARES 206 +#define GCC_QDSS_DAP_ARES 207 +#define GCC_QDSS_APB2JTAG_ARES 208 +#define GCC_QDSS_ETR_USB_ARES 209 +#define GCC_QDSS_DAP_AHB_ARES 210 +#define GCC_QDSS_CFG_AHB_ARES 211 +#define GCC_QDSS_EUD_AT_ARES 212 +#define GCC_QDSS_TS_ARES 213 +#define GCC_QDSS_USB_ARES 214 +#define GCC_SYS_NOC_AXI_ARES 215 +#define GCC_SNOC_QOSGEN_EXTREF_ARES 216 +#define GCC_CNOC_LPASS_CFG_ARES 217 +#define GCC_SYS_NOC_AT_ARES 218 +#define GCC_SNOC_PCNOC_AHB_ARES 219 +#define GCC_SNOC_TME_ARES 220 +#define GCC_SNOC_XO_DCD_ARES 221 +#define GCC_SNOC_TS_ARES 222 +#define GCC_ANOC0_AXI_ARES 223 +#define GCC_ANOC_PCIE0_1LANE_M_ARES 224 +#define GCC_ANOC_PCIE2_2LANE_M_ARES 225 +#define GCC_ANOC_PCIE1_1LANE_M_ARES 226 +#define GCC_ANOC_PCIE3_2LANE_M_ARES 227 +#define GCC_ANOC_PCNOC_AHB_ARES 228 +#define GCC_ANOC_QOSGEN_EXTREF_ARES 229 +#define GCC_ANOC_XO_DCD_ARES 230 +#define GCC_SNOC_XO_DBG_ARES 231 +#define GCC_AGGRNOC_ATB_ARES 232 +#define GCC_AGGRNOC_TS_ARES 233 +#define GCC_USB0_EUD_AT_ARES 234 +#define GCC_PCNOC_TIC_ARES 235 +#define GCC_PCNOC_AHB_ARES 236 +#define GCC_PCNOC_XO_DBG_ARES 237 +#define GCC_SNOC_LPASS_ARES 238 +#define GCC_PCNOC_AT_ARES 239 +#define GCC_PCNOC_XO_DCD_ARES 240 +#define GCC_PCNOC_TS_ARES 241 +#define GCC_PCNOC_BUS_TIMEOUT0_AHB_ARES 242 +#define GCC_PCNOC_BUS_TIMEOUT1_AHB_ARES 243 +#define GCC_PCNOC_BUS_TIMEOUT2_AHB_ARES 244 +#define GCC_PCNOC_BUS_TIMEOUT3_AHB_ARES 245 +#define GCC_PCNOC_BUS_TIMEOUT4_AHB_ARES 246 +#define GCC_PCNOC_BUS_TIMEOUT5_AHB_ARES 247 +#define GCC_PCNOC_BUS_TIMEOUT6_AHB_ARES 248 +#define GCC_PCNOC_BUS_TIMEOUT7_AHB_ARES 249 +#define GCC_Q6_AXIM_RESET 250 +#define GCC_Q6_AXIS_RESET 251 +#define GCC_Q6_AHB_S_RESET 252 +#define GCC_Q6_AHB_RESET 253 +#define GCC_Q6SS_DBG_RESET 254 +#define GCC_WCSS_ECAHB_RESET 255 +#define GCC_WCSS_DBG_BDG_RESET 256 +#define GCC_WCSS_DBG_RESET 257 +#define GCC_WCSS_AXI_M_RESET 258 +#define GCC_WCSS_AHB_S_RESET 259 +#define GCC_WCSS_ACMT_RESET 260 +#define GCC_WCSSAON_RESET 261 +#define GCC_PCIE0_PIPE_RESET 262 +#define GCC_PCIE0_CORE_STICKY_RESET 263 +#define GCC_PCIE0_AXI_S_STICKY_RESET 264 +#define GCC_PCIE0_AXI_S_RESET 265 +#define GCC_PCIE0_AXI_M_STICKY_RESET 266 +#define GCC_PCIE0_AXI_M_RESET 267 +#define GCC_PCIE0_AUX_RESET 268 +#define GCC_PCIE0_AHB_RESET 269 +#define GCC_PCIE1_PIPE_RESET 270 +#define GCC_PCIE1_CORE_STICKY_RESET 271 +#define GCC_PCIE1_AXI_S_STICKY_RESET 272 +#define GCC_PCIE1_AXI_S_RESET 273 +#define GCC_PCIE1_AXI_M_STICKY_RESET 274 +#define GCC_PCIE1_AXI_M_RESET 275 +#define GCC_PCIE1_AUX_RESET 276 +#define GCC_PCIE1_AHB_RESET 277 +#define GCC_PCIE2_PIPE_RESET 278 +#define GCC_PCIE2_CORE_STICKY_RESET 279 +#define GCC_PCIE2_AXI_S_STICKY_RESET 280 +#define GCC_PCIE2_AXI_S_RESET 281 +#define GCC_PCIE2_AXI_M_STICKY_RESET 282 +#define GCC_PCIE2_AXI_M_RESET 283 +#define GCC_PCIE2_AUX_RESET 284 +#define GCC_PCIE2_AHB_RESET 285 +#define GCC_PCIE3_PIPE_RESET 286 +#define GCC_PCIE3_CORE_STICKY_RESET 287 +#define GCC_PCIE3_AXI_S_STICKY_RESET 288 +#define GCC_PCIE3_AXI_S_RESET 289 +#define GCC_PCIE3_AXI_M_STICKY_RESET 290 +#define GCC_PCIE3_AXI_M_RESET 291 +#define GCC_PCIE3_AUX_RESET 292 +#define GCC_PCIE3_AHB_RESET 293 +#define GCC_NSS_PARTIAL_RESET 294 +#define GCC_UNIPHY0_XPCS_ARES 295 +#define GCC_UNIPHY1_XPCS_ARES 296 +#define GCC_UNIPHY2_XPCS_ARES 297 +#define GCC_USB1_BCR 298 +#define GCC_QUSB2_1_PHY_BCR 299 + +#endif diff --git a/include/dt-bindings/reset/qcom,sar2130p-gpucc.h b/include/dt-bindings/reset/qcom,sar2130p-gpucc.h new file mode 100644 index 000000000000..99ba5f092e2a --- /dev/null +++ b/include/dt-bindings/reset/qcom,sar2130p-gpucc.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2024, Linaro Limited + */ + +#ifndef _DT_BINDINGS_RESET_QCOM_GPU_CC_SAR2130P_H +#define _DT_BINDINGS_RESET_QCOM_GPU_CC_SAR2130P_H + +#define GPUCC_GPU_CC_GX_BCR 0 +#define GPUCC_GPU_CC_ACD_BCR 1 +#define GPUCC_GPU_CC_GX_ACD_IROOT_BCR 2 + +#endif diff --git a/include/kunit/clk.h b/include/kunit/clk.h index 73bc99cefe7b..0afae7688157 100644 --- a/include/kunit/clk.h +++ b/include/kunit/clk.h @@ -25,4 +25,8 @@ int clk_hw_register_kunit(struct kunit *test, struct device *dev, struct clk_hw int of_clk_hw_register_kunit(struct kunit *test, struct device_node *node, struct clk_hw *hw); +int of_clk_add_hw_provider_kunit(struct kunit *test, struct device_node *np, + struct clk_hw *(*get)(struct of_phandle_args *clkspec, void *data), + void *data); + #endif diff --git a/include/kunit/of.h b/include/kunit/of.h index 48d4e70c9666..75a760a4e2a5 100644 --- a/include/kunit/of.h +++ b/include/kunit/of.h @@ -62,6 +62,13 @@ static inline int __of_overlay_apply_kunit(struct kunit *test, &unused); } +#define of_overlay_begin(overlay_name) __dtbo_##overlay_name##_begin +#define of_overlay_end(overlay_name) __dtbo_##overlay_name##_end + +#define OF_OVERLAY_DECLARE(overlay_name) \ + extern uint8_t of_overlay_begin(overlay_name)[]; \ + extern uint8_t of_overlay_end(overlay_name)[] \ + /** * of_overlay_apply_kunit() - Test managed of_overlay_fdt_apply() for built-in overlays * @test: test context @@ -104,12 +111,11 @@ static inline int __of_overlay_apply_kunit(struct kunit *test, */ #define of_overlay_apply_kunit(test, overlay_name) \ ({ \ - extern uint8_t __dtbo_##overlay_name##_begin[]; \ - extern uint8_t __dtbo_##overlay_name##_end[]; \ + OF_OVERLAY_DECLARE(overlay_name); \ \ __of_overlay_apply_kunit((test), \ - __dtbo_##overlay_name##_begin, \ - __dtbo_##overlay_name##_end); \ + of_overlay_begin(overlay_name), \ + of_overlay_end(overlay_name)); \ }) #endif diff --git a/include/kunit/skbuff.h b/include/kunit/skbuff.h index 44d12370939a..07784694357c 100644 --- a/include/kunit/skbuff.h +++ b/include/kunit/skbuff.h @@ -20,8 +20,9 @@ static void kunit_action_kfree_skb(void *p) * kunit_zalloc_skb() - Allocate and initialize a resource managed skb. * @test: The test case to which the skb belongs * @len: size to allocate + * @gfp: allocation flags * - * Allocate a new struct sk_buff with GFP_KERNEL, zero fill the give length + * Allocate a new struct sk_buff with gfp flags, zero fill the given length * and add it as a resource to the kunit test for automatic cleanup. * * Returns: newly allocated SKB, or %NULL on error @@ -29,7 +30,7 @@ static void kunit_action_kfree_skb(void *p) static inline struct sk_buff *kunit_zalloc_skb(struct kunit *test, int len, gfp_t gfp) { - struct sk_buff *res = alloc_skb(len, GFP_KERNEL); + struct sk_buff *res = alloc_skb(len, gfp); if (!res || skb_pad(res, len)) return NULL; diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index c819c5d16613..fd650a8789b9 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -147,6 +147,9 @@ u64 timer_get_cval(struct arch_timer_context *ctxt); void kvm_timer_cpu_up(void); void kvm_timer_cpu_down(void); +/* CNTKCTL_EL1 valid bits as of DDI0487J.a */ +#define CNTKCTL_VALID_BITS (BIT(17) | GENMASK_ULL(9, 0)) + static inline bool has_cntpoff(void) { return (has_vhe() && cpus_have_final_cap(ARM64_HAS_ECV_CNTPOFF)); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index e08aeec5d936..e61dd7dd2286 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -47,7 +47,8 @@ static __always_inline bool kvm_arm_support_pmu_v3(void) #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); -u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); +u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu); +u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu); u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); @@ -96,6 +97,8 @@ int kvm_arm_set_default_pmu(struct kvm *kvm); u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); +bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx); +void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -113,7 +116,11 @@ static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, } static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) {} -static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) +static inline u64 kvm_pmu_implemented_counter_mask(struct kvm_vcpu *vcpu) +{ + return 0; +} +static inline u64 kvm_pmu_accessible_counter_mask(struct kvm_vcpu *vcpu) { return 0; } @@ -187,6 +194,13 @@ static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) return 0; } +static inline bool kvm_pmu_counter_is_hyp(struct kvm_vcpu *vcpu, unsigned int idx) +{ + return false; +} + +static inline void kvm_pmu_nested_transition(struct kvm_vcpu *vcpu) {} + #endif #endif diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index e8fb624013d1..cbaec804eb83 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -14,8 +14,10 @@ #define KVM_ARM_PSCI_0_2 PSCI_VERSION(0, 2) #define KVM_ARM_PSCI_1_0 PSCI_VERSION(1, 0) #define KVM_ARM_PSCI_1_1 PSCI_VERSION(1, 1) +#define KVM_ARM_PSCI_1_2 PSCI_VERSION(1, 2) +#define KVM_ARM_PSCI_1_3 PSCI_VERSION(1, 3) -#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_1 +#define KVM_ARM_PSCI_LATEST KVM_ARM_PSCI_1_3 static inline int kvm_psci_version(struct kvm_vcpu *vcpu) { diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index f5172549f9ba..3a8ccfda34d2 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -26,7 +26,6 @@ #define VGIC_NR_SGIS 16 #define VGIC_NR_PPIS 16 #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) -#define VGIC_MAX_PRIVATE (VGIC_NR_PRIVATE_IRQS - 1) #define VGIC_MAX_SPI 1019 #define VGIC_MAX_RESERVED 1023 #define VGIC_MIN_LPI 8192 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 4d5ee84c468b..05f39fbfa485 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1164,8 +1164,6 @@ int acpi_subsys_suspend_noirq(struct device *dev); int acpi_subsys_suspend(struct device *dev); int acpi_subsys_freeze(struct device *dev); int acpi_subsys_poweroff(struct device *dev); -void acpi_ec_mark_gpe_for_wake(void); -void acpi_ec_set_gpe_wake_mask(u8 action); int acpi_subsys_restore_early(struct device *dev); #else static inline int acpi_subsys_prepare(struct device *dev) { return 0; } @@ -1176,6 +1174,12 @@ static inline int acpi_subsys_suspend(struct device *dev) { return 0; } static inline int acpi_subsys_freeze(struct device *dev) { return 0; } static inline int acpi_subsys_poweroff(struct device *dev) { return 0; } static inline int acpi_subsys_restore_early(struct device *dev) { return 0; } +#endif + +#if defined(CONFIG_ACPI_EC) && defined(CONFIG_PM_SLEEP) +void acpi_ec_mark_gpe_for_wake(void); +void acpi_ec_set_gpe_wake_mask(u8 action); +#else static inline void acpi_ec_mark_gpe_for_wake(void) {} static inline void acpi_ec_set_gpe_wake_mask(u8 action) {} #endif @@ -1526,17 +1530,7 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) } #endif -#ifdef CONFIG_ARM64 -void acpi_arm_init(void); -#else -static inline void acpi_arm_init(void) { } -#endif - -#ifdef CONFIG_RISCV -void acpi_riscv_init(void); -#else -static inline void acpi_riscv_init(void) { } -#endif +void acpi_arch_init(void); #ifdef CONFIG_ACPI_PCC void acpi_init_pcc(void); diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h index 72cedb916a9c..e748b2877602 100644 --- a/include/linux/acpi_dma.h +++ b/include/linux/acpi_dma.h @@ -11,10 +11,11 @@ #ifndef __LINUX_ACPI_DMA_H #define __LINUX_ACPI_DMA_H -#include <linux/list.h> -#include <linux/device.h> #include <linux/err.h> #include <linux/dmaengine.h> +#include <linux/types.h> + +struct device; /** * struct acpi_dma_spec - slave device DMA resources @@ -65,7 +66,6 @@ int devm_acpi_dma_controller_register(struct device *dev, struct dma_chan *(*acpi_dma_xlate) (struct acpi_dma_spec *, struct acpi_dma *), void *data); -void devm_acpi_dma_controller_free(struct device *dev); struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, size_t index); @@ -94,9 +94,6 @@ static inline int devm_acpi_dma_controller_register(struct device *dev, { return -ENODEV; } -static inline void devm_acpi_dma_controller_free(struct device *dev) -{ -} static inline struct dma_chan *acpi_dma_request_slave_chan_by_index( struct device *dev, size_t index) diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h index 05e758b8b894..3ffa5341dce2 100644 --- a/include/linux/alarmtimer.h +++ b/include/linux/alarmtimer.h @@ -20,12 +20,6 @@ enum alarmtimer_type { ALARM_BOOTTIME_FREEZER, }; -enum alarmtimer_restart { - ALARMTIMER_NORESTART, - ALARMTIMER_RESTART, -}; - - #define ALARMTIMER_STATE_INACTIVE 0x00 #define ALARMTIMER_STATE_ENQUEUED 0x01 @@ -42,14 +36,14 @@ enum alarmtimer_restart { struct alarm { struct timerqueue_node node; struct hrtimer timer; - enum alarmtimer_restart (*function)(struct alarm *, ktime_t now); + void (*function)(struct alarm *, ktime_t now); enum alarmtimer_type type; int state; void *data; }; void alarm_init(struct alarm *alarm, enum alarmtimer_type type, - enum alarmtimer_restart (*function)(struct alarm *, ktime_t)); + void (*function)(struct alarm *, ktime_t)); void alarm_start(struct alarm *alarm, ktime_t start); void alarm_start_relative(struct alarm *alarm, ktime_t start); void alarm_restart(struct alarm *alarm); diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h index c4a0b23846d8..dcb1d37dabc2 100644 --- a/include/linux/alcor_pci.h +++ b/include/linux/alcor_pci.h @@ -11,6 +11,7 @@ #define ALCOR_SD_CARD 0 #define ALCOR_MS_CARD 1 +#define DRV_NAME_ALCOR_PCI "alcor_pci" #define DRV_NAME_ALCOR_PCI_SDMMC "alcor_sdmmc" #define DRV_NAME_ALCOR_PCI_MS "alcor_ms" diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 941deffc590d..7c0786bdf9af 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -30,6 +30,21 @@ struct alloc_tag { struct alloc_tag_counters __percpu *counters; } __aligned(8); +struct alloc_tag_kernel_section { + struct alloc_tag *first_tag; + unsigned long count; +}; + +struct alloc_tag_module_section { + union { + unsigned long start_addr; + struct alloc_tag *first_tag; + }; + unsigned long end_addr; + /* used size */ + unsigned long size; +}; + #ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG #define CODETAG_EMPTY ((void *)1) @@ -54,6 +69,8 @@ static inline void set_codetag_empty(union codetag_ref *ref) {} #ifdef CONFIG_MEM_ALLOC_PROFILING +#define ALLOC_TAG_SECTION_NAME "alloc_tags" + struct codetag_bytes { struct codetag *ct; s64 bytes; @@ -76,7 +93,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); #define DEFINE_ALLOC_TAG(_alloc_tag) \ static struct alloc_tag _alloc_tag __used __aligned(8) \ - __section("alloc_tags") = { \ + __section(ALLOC_TAG_SECTION_NAME) = { \ .ct = CODE_TAG_INIT, \ .counters = &_shared_alloc_tag }; @@ -85,7 +102,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); #define DEFINE_ALLOC_TAG(_alloc_tag) \ static DEFINE_PER_CPU(struct alloc_tag_counters, _alloc_tag_cntr); \ static struct alloc_tag _alloc_tag __used __aligned(8) \ - __section("alloc_tags") = { \ + __section(ALLOC_TAG_SECTION_NAME) = { \ .ct = CODE_TAG_INIT, \ .counters = &_alloc_tag_cntr }; diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index dda2f3ea89cb..9946276aff73 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -121,6 +121,7 @@ extern const struct bus_type amba_bustype; #ifdef CONFIG_ARM_AMBA int __amba_driver_register(struct amba_driver *, struct module *); void amba_driver_unregister(struct amba_driver *); +bool dev_is_amba(const struct device *dev); #else static inline int __amba_driver_register(struct amba_driver *drv, struct module *owner) @@ -130,6 +131,10 @@ static inline int __amba_driver_register(struct amba_driver *drv, static inline void amba_driver_unregister(struct amba_driver *drv) { } +static inline bool dev_is_amba(const struct device *dev) +{ + return false; +} #endif struct amba_device *amba_device_alloc(const char *, resource_size_t, size_t); diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 4a952c4885ed..2222e8b03ff4 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -45,6 +45,7 @@ enum scale_freq_source { SCALE_FREQ_SOURCE_CPUFREQ = 0, SCALE_FREQ_SOURCE_ARCH, SCALE_FREQ_SOURCE_CPPC, + SCALE_FREQ_SOURCE_VIRT, }; struct scale_freq_data { diff --git a/include/linux/asn1_decoder.h b/include/linux/asn1_decoder.h index 83f9c6e1e5e9..b41bce82a191 100644 --- a/include/linux/asn1_decoder.h +++ b/include/linux/asn1_decoder.h @@ -9,6 +9,7 @@ #define _LINUX_ASN1_DECODER_H #include <linux/asn1.h> +#include <linux/types.h> struct asn1_decoder; diff --git a/include/linux/asn1_encoder.h b/include/linux/asn1_encoder.h index 08cd0c2ad34f..d17484dffb74 100644 --- a/include/linux/asn1_encoder.h +++ b/include/linux/asn1_encoder.h @@ -6,7 +6,6 @@ #include <linux/types.h> #include <linux/asn1.h> #include <linux/asn1_ber_bytecode.h> -#include <linux/bug.h> #define asn1_oid_len(oid) (sizeof(oid)/sizeof(u32)) unsigned char * diff --git a/include/linux/ath9k_platform.h b/include/linux/ath9k_platform.h deleted file mode 100644 index 76860a461ed2..000000000000 --- a/include/linux/ath9k_platform.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (c) 2008 Atheros Communications Inc. - * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org> - * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef _LINUX_ATH9K_PLATFORM_H -#define _LINUX_ATH9K_PLATFORM_H - -#define ATH9K_PLAT_EEP_MAX_WORDS 2048 - -struct ath9k_platform_data { - const char *eeprom_name; - - u16 eeprom_data[ATH9K_PLAT_EEP_MAX_WORDS]; - u8 *macaddr; - - int led_pin; - u32 gpio_mask; - u32 gpio_val; - - u32 bt_active_pin; - u32 bt_priority_pin; - u32 wlan_active_pin; - - bool endian_check; - bool is_clk_25mhz; - bool tx_gain_buffalo; - bool disable_2ghz; - bool disable_5ghz; - bool led_active_high; - - int (*get_mac_revision)(void); - int (*external_reset)(void); - - bool use_eeprom; -}; - -#endif /* _LINUX_ATH9K_PLATFORM_H */ diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h index 31762324bcc9..65dd7f154374 100644 --- a/include/linux/auxiliary_bus.h +++ b/include/linux/auxiliary_bus.h @@ -269,8 +269,4 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv); #define module_auxiliary_driver(__auxiliary_driver) \ module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister) -struct auxiliary_device *auxiliary_find_device(struct device *start, - const void *data, - device_match_t match); - #endif /* _AUXILIARY_BUS_H_ */ diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index f41395264dca..13a11f3c09b8 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -89,6 +89,9 @@ enum virtchnl_rx_hsplit { VIRTCHNL_RX_HSPLIT_SPLIT_SCTP = 8, }; +enum virtchnl_bw_limit_type { + VIRTCHNL_BW_SHAPER = 0, +}; /* END GENERIC DEFINES */ /* Opcodes for VF-PF communication. These are placed in the v_opcode field @@ -151,6 +154,11 @@ enum virtchnl_ops { VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2 = 55, VIRTCHNL_OP_ENABLE_VLAN_INSERTION_V2 = 56, VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2 = 57, + /* opcode 57 - 65 are reserved */ + VIRTCHNL_OP_GET_QOS_CAPS = 66, + /* opcode 68 through 111 are reserved */ + VIRTCHNL_OP_CONFIG_QUEUE_BW = 112, + VIRTCHNL_OP_CONFIG_QUANTA = 113, VIRTCHNL_OP_MAX, }; @@ -261,6 +269,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); #define VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC BIT(26) #define VIRTCHNL_VF_OFFLOAD_ADV_RSS_PF BIT(27) #define VIRTCHNL_VF_OFFLOAD_FDIR_PF BIT(28) +#define VIRTCHNL_VF_OFFLOAD_QOS BIT(29) #define VF_BASE_MODE_OFFLOADS (VIRTCHNL_VF_OFFLOAD_L2 | \ VIRTCHNL_VF_OFFLOAD_VLAN | \ @@ -1416,6 +1425,86 @@ struct virtchnl_fdir_del { VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); +struct virtchnl_shaper_bw { + /* Unit is Kbps */ + u32 committed; + u32 peak; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_shaper_bw); + +/* VIRTCHNL_OP_GET_QOS_CAPS + * VF sends this message to get its QoS Caps, such as + * TC number, Arbiter and Bandwidth. + */ +struct virtchnl_qos_cap_elem { + u8 tc_num; + u8 tc_prio; +#define VIRTCHNL_ABITER_STRICT 0 +#define VIRTCHNL_ABITER_ETS 2 + u8 arbiter; +#define VIRTCHNL_STRICT_WEIGHT 1 + u8 weight; + enum virtchnl_bw_limit_type type; + union { + struct virtchnl_shaper_bw shaper; + u8 pad2[32]; + }; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(40, virtchnl_qos_cap_elem); + +struct virtchnl_qos_cap_list { + u16 vsi_id; + u16 num_elem; + struct virtchnl_qos_cap_elem cap[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_qos_cap_list); +#define virtchnl_qos_cap_list_LEGACY_SIZEOF 44 + +/* VIRTCHNL_OP_CONFIG_QUEUE_BW */ +struct virtchnl_queue_bw { + u16 queue_id; + u8 tc; + u8 pad; + struct virtchnl_shaper_bw shaper; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_queue_bw); + +struct virtchnl_queues_bw_cfg { + u16 vsi_id; + u16 num_queues; + struct virtchnl_queue_bw cfg[]; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(4, virtchnl_queues_bw_cfg); +#define virtchnl_queues_bw_cfg_LEGACY_SIZEOF 16 + +enum virtchnl_queue_type { + VIRTCHNL_QUEUE_TYPE_TX = 0, + VIRTCHNL_QUEUE_TYPE_RX = 1, +}; + +/* structure to specify a chunk of contiguous queues */ +struct virtchnl_queue_chunk { + /* see enum virtchnl_queue_type */ + s32 type; + u16 start_queue_id; + u16 num_queues; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(8, virtchnl_queue_chunk); + +struct virtchnl_quanta_cfg { + u16 quanta_size; + u16 pad; + struct virtchnl_queue_chunk queue_select; +}; + +VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_quanta_cfg); + #define __vss_byone(p, member, count, old) \ (struct_size(p, member, count) + (old - 1 - struct_size(p, member, 0))) @@ -1438,6 +1527,8 @@ VIRTCHNL_CHECK_STRUCT_LEN(12, virtchnl_fdir_del); __vss(virtchnl_vlan_filter_list_v2, __vss_byelem, p, m, c), \ __vss(virtchnl_tc_info, __vss_byelem, p, m, c), \ __vss(virtchnl_rdma_qvlist_info, __vss_byelem, p, m, c), \ + __vss(virtchnl_qos_cap_list, __vss_byelem, p, m, c), \ + __vss(virtchnl_queues_bw_cfg, __vss_byelem, p, m, c), \ __vss(virtchnl_rss_key, __vss_byone, p, m, c), \ __vss(virtchnl_rss_lut, __vss_byone, p, m, c)) @@ -1637,6 +1728,35 @@ virtchnl_vc_validate_vf_msg(struct virtchnl_version_info *ver, u32 v_opcode, case VIRTCHNL_OP_DISABLE_VLAN_INSERTION_V2: valid_len = sizeof(struct virtchnl_vlan_setting); break; + case VIRTCHNL_OP_GET_QOS_CAPS: + break; + case VIRTCHNL_OP_CONFIG_QUEUE_BW: + valid_len = virtchnl_queues_bw_cfg_LEGACY_SIZEOF; + if (msglen >= valid_len) { + struct virtchnl_queues_bw_cfg *q_bw = + (struct virtchnl_queues_bw_cfg *)msg; + + valid_len = virtchnl_struct_size(q_bw, cfg, + q_bw->num_queues); + if (q_bw->num_queues == 0) { + err_msg_format = true; + break; + } + } + break; + case VIRTCHNL_OP_CONFIG_QUANTA: + valid_len = sizeof(struct virtchnl_quanta_cfg); + if (msglen >= valid_len) { + struct virtchnl_quanta_cfg *q_quanta = + (struct virtchnl_quanta_cfg *)msg; + + if (q_quanta->quanta_size == 0 || + q_quanta->queue_select.num_queues == 0) { + err_msg_format = true; + break; + } + } + break; /* These are always errors coming from the VF. */ case VIRTCHNL_OP_EVENT: case VIRTCHNL_OP_UNKNOWN: diff --git a/include/linux/backing-file.h b/include/linux/backing-file.h index 2eed0ffb5e8f..1476a6ed1bfd 100644 --- a/include/linux/backing-file.h +++ b/include/linux/backing-file.h @@ -14,9 +14,8 @@ struct backing_file_ctx { const struct cred *cred; - struct file *user_file; - void (*accessed)(struct file *); - void (*end_write)(struct file *, loff_t, ssize_t); + void (*accessed)(struct file *file); + void (*end_write)(struct kiocb *iocb, ssize_t); }; struct file *backing_file_open(const struct path *user_path, int flags, @@ -31,13 +30,13 @@ ssize_t backing_file_read_iter(struct file *file, struct iov_iter *iter, ssize_t backing_file_write_iter(struct file *file, struct iov_iter *iter, struct kiocb *iocb, int flags, struct backing_file_ctx *ctx); -ssize_t backing_file_splice_read(struct file *in, loff_t *ppos, +ssize_t backing_file_splice_read(struct file *in, struct kiocb *iocb, struct pipe_inode_info *pipe, size_t len, unsigned int flags, struct backing_file_ctx *ctx); ssize_t backing_file_splice_write(struct pipe_inode_info *pipe, - struct file *out, loff_t *ppos, size_t len, - unsigned int flags, + struct file *out, struct kiocb *iocb, + size_t len, unsigned int flags, struct backing_file_ctx *ctx); int backing_file_mmap(struct file *file, struct vm_area_struct *vma, struct backing_file_ctx *ctx); diff --git a/include/linux/backlight.h b/include/linux/backlight.h index ea9c1bc8148e..f5652e5a9060 100644 --- a/include/linux/backlight.h +++ b/include/linux/backlight.h @@ -66,24 +66,6 @@ enum backlight_type { BACKLIGHT_TYPE_MAX, }; -/** - * enum backlight_notification - the type of notification - * - * The notifications that is used for notification sent to the receiver - * that registered notifications using backlight_register_notifier(). - */ -enum backlight_notification { - /** - * @BACKLIGHT_REGISTERED: The backlight device is registered. - */ - BACKLIGHT_REGISTERED, - - /** - * @BACKLIGHT_UNREGISTERED: The backlight revice is unregistered. - */ - BACKLIGHT_UNREGISTERED, -}; - /** enum backlight_scale - the type of scale used for brightness values * * The type of scale used for brightness values. @@ -421,8 +403,6 @@ void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd); void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason); -int backlight_register_notifier(struct notifier_block *nb); -int backlight_unregister_notifier(struct notifier_block *nb); struct backlight_device *backlight_device_get_by_name(const char *name); struct backlight_device *backlight_device_get_by_type(enum backlight_type type); int backlight_device_set_brightness(struct backlight_device *bd, diff --git a/include/linux/bio-integrity.h b/include/linux/bio-integrity.h index dd831c269e99..dbf0f74c1529 100644 --- a/include/linux/bio-integrity.h +++ b/include/linux/bio-integrity.h @@ -72,7 +72,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, gfp_t gfp, unsigned int nr); int bio_integrity_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int offset); -int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len, u32 seed); +int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t len); void bio_integrity_unmap_user(struct bio *bio); bool bio_integrity_prep(struct bio *bio); void bio_integrity_advance(struct bio *bio, unsigned int bytes_done); @@ -99,7 +99,7 @@ static inline void bioset_integrity_free(struct bio_set *bs) } static inline int bio_integrity_map_user(struct bio *bio, void __user *ubuf, - ssize_t len, u32 seed) + ssize_t len) { return -EINVAL; } diff --git a/include/linux/bio.h b/include/linux/bio.h index faceadb040f9..60830a6a5939 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -418,8 +418,6 @@ bool __must_check bio_add_folio(struct bio *bio, struct folio *folio, size_t len, size_t off); extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, unsigned int, unsigned int); -int bio_add_zone_append_page(struct bio *bio, struct page *page, - unsigned int len, unsigned int offset); void __bio_add_page(struct bio *bio, struct page *page, unsigned int len, unsigned int off); void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, @@ -677,6 +675,23 @@ static inline void bio_clear_polled(struct bio *bio) bio->bi_opf &= ~REQ_POLLED; } +/** + * bio_is_zone_append - is this a zone append bio? + * @bio: bio to check + * + * Check if @bio is a zone append operation. Core block layer code and end_io + * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check + * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if + * it is not natively supported. + */ +static inline bool bio_is_zone_append(struct bio *bio) +{ + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED)) + return false; + return bio_op(bio) == REQ_OP_ZONE_APPEND || + bio_flagged(bio, BIO_EMULATES_ZONE_APPEND); +} + struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, unsigned int nr_pages, blk_opf_t opf, gfp_t gfp); struct bio *bio_chain_and_submit(struct bio *prev, struct bio *new); diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index 676f8f860c47..c7eae0bfb013 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -28,7 +28,7 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t, int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, - ssize_t bytes, u32 seed); + ssize_t bytes); static inline bool blk_integrity_queue_supports_integrity(struct request_queue *q) @@ -104,8 +104,7 @@ static inline int blk_rq_map_integrity_sg(struct request *q, } static inline int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, - ssize_t bytes, - u32 seed) + ssize_t bytes) { return -EINVAL; } diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 4fecf46ef681..c596e0e4cb75 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -156,9 +156,6 @@ struct request { struct blk_crypto_keyslot *crypt_keyslot; #endif - enum rw_hint write_hint; - unsigned short ioprio; - enum mq_rq_state state; atomic_t ref; @@ -222,7 +219,9 @@ static inline bool blk_rq_is_passthrough(struct request *rq) static inline unsigned short req_get_ioprio(struct request *req) { - return req->ioprio; + if (req->bio) + return req->bio->bi_ioprio; + return 0; } #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) @@ -230,62 +229,61 @@ static inline unsigned short req_get_ioprio(struct request *req) #define rq_dma_dir(rq) \ (op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE) -#define rq_list_add(listptr, rq) do { \ - (rq)->rq_next = *(listptr); \ - *(listptr) = rq; \ -} while (0) - -#define rq_list_add_tail(lastpptr, rq) do { \ - (rq)->rq_next = NULL; \ - **(lastpptr) = rq; \ - *(lastpptr) = &rq->rq_next; \ -} while (0) - -#define rq_list_pop(listptr) \ -({ \ - struct request *__req = NULL; \ - if ((listptr) && *(listptr)) { \ - __req = *(listptr); \ - *(listptr) = __req->rq_next; \ - } \ - __req; \ -}) +static inline int rq_list_empty(const struct rq_list *rl) +{ + return rl->head == NULL; +} -#define rq_list_peek(listptr) \ -({ \ - struct request *__req = NULL; \ - if ((listptr) && *(listptr)) \ - __req = *(listptr); \ - __req; \ -}) +static inline void rq_list_init(struct rq_list *rl) +{ + rl->head = NULL; + rl->tail = NULL; +} -#define rq_list_for_each(listptr, pos) \ - for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) +static inline void rq_list_add_tail(struct rq_list *rl, struct request *rq) +{ + rq->rq_next = NULL; + if (rl->tail) + rl->tail->rq_next = rq; + else + rl->head = rq; + rl->tail = rq; +} -#define rq_list_for_each_safe(listptr, pos, nxt) \ - for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \ - pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL) +static inline void rq_list_add_head(struct rq_list *rl, struct request *rq) +{ + rq->rq_next = rl->head; + rl->head = rq; + if (!rl->tail) + rl->tail = rq; +} -#define rq_list_next(rq) (rq)->rq_next -#define rq_list_empty(list) ((list) == (struct request *) NULL) +static inline struct request *rq_list_pop(struct rq_list *rl) +{ + struct request *rq = rl->head; -/** - * rq_list_move() - move a struct request from one list to another - * @src: The source list @rq is currently in - * @dst: The destination list that @rq will be appended to - * @rq: The request to move - * @prev: The request preceding @rq in @src (NULL if @rq is the head) - */ -static inline void rq_list_move(struct request **src, struct request **dst, - struct request *rq, struct request *prev) + if (rq) { + rl->head = rl->head->rq_next; + if (!rl->head) + rl->tail = NULL; + rq->rq_next = NULL; + } + + return rq; +} + +static inline struct request *rq_list_peek(struct rq_list *rl) { - if (prev) - prev->rq_next = rq->rq_next; - else - *src = rq->rq_next; - rq_list_add(dst, rq); + return rl->head; } +#define rq_list_for_each(rl, pos) \ + for (pos = rq_list_peek((rl)); (pos); pos = pos->rq_next) + +#define rq_list_for_each_safe(rl, pos, nxt) \ + for (pos = rq_list_peek((rl)), nxt = pos->rq_next; \ + pos; pos = nxt, nxt = pos ? pos->rq_next : NULL) + /** * enum blk_eh_timer_return - How the timeout handler should proceed * @BLK_EH_DONE: The block driver completed the command or will complete it at @@ -577,7 +575,7 @@ struct blk_mq_ops { * empty the @rqlist completely, then the rest will be queued * individually by the block layer upon return. */ - void (*queue_rqs)(struct request **rqlist); + void (*queue_rqs)(struct rq_list *rqlist); /** * @get_budget: Reserve budget before queue request, once .queue_rq is @@ -857,12 +855,6 @@ void blk_mq_end_request_batch(struct io_comp_batch *ib); */ static inline bool blk_mq_need_time_stamp(struct request *rq) { - /* - * passthrough io doesn't use iostat accounting, cgroup stats - * and io scheduler functionalities. - */ - if (blk_rq_is_passthrough(rq)) - return false; return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS | RQF_USE_SCHED)); } @@ -892,7 +884,7 @@ static inline bool blk_mq_add_to_batch(struct request *req, else if (iob->complete != complete) return false; iob->need_ts |= blk_mq_need_time_stamp(req); - rq_list_add(&iob->req_list, req); + rq_list_add_tail(&iob->req_list, req); return true; } @@ -925,6 +917,8 @@ void blk_freeze_queue_start(struct request_queue *q); void blk_mq_freeze_queue_wait(struct request_queue *q); int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, unsigned long timeout); +void blk_mq_unfreeze_queue_non_owner(struct request_queue *q); +void blk_freeze_queue_start_non_owner(struct request_queue *q); void blk_mq_map_queues(struct blk_mq_queue_map *qmap); void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues); @@ -989,7 +983,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, rq->nr_phys_segments = nr_segs; rq->__data_len = bio->bi_iter.bi_size; rq->bio = rq->biotail = bio; - rq->ioprio = bio_prio(bio); } void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx, diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 50c3b959da28..08a727b40816 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -25,6 +25,7 @@ #include <linux/uuid.h> #include <linux/xarray.h> #include <linux/file.h> +#include <linux/lockdep.h> struct module; struct request_queue; @@ -194,7 +195,7 @@ struct gendisk { unsigned int nr_zones; unsigned int zone_capacity; unsigned int last_zone_capacity; - unsigned long *conv_zones_bitmap; + unsigned long __rcu *conv_zones_bitmap; unsigned int zone_wplugs_hash_bits; spinlock_t zone_wplugs_lock; struct mempool_s *zone_wplugs_pool; @@ -332,6 +333,10 @@ typedef unsigned int __bitwise blk_features_t; #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \ ((__force blk_features_t)(1u << 15)) +/* stacked device can/does support atomic writes */ +#define BLK_FEAT_ATOMIC_WRITES_STACKED \ + ((__force blk_features_t)(1u << 16)) + /* * Flags automatically inherited when stacking limits. */ @@ -349,6 +354,9 @@ typedef unsigned int __bitwise blk_flags_t; /* I/O topology is misaligned */ #define BLK_FLAG_MISALIGNED ((__force blk_flags_t)(1u << 1)) +/* passthrough command IO accounting */ +#define BLK_FLAG_IOSTATS_PASSTHROUGH ((__force blk_flags_t)(1u << 2)) + struct queue_limits { blk_features_t features; blk_flags_t flags; @@ -371,6 +379,7 @@ struct queue_limits { unsigned int max_user_discard_sectors; unsigned int max_secure_erase_sectors; unsigned int max_write_zeroes_sectors; + unsigned int max_hw_zone_append_sectors; unsigned int max_zone_append_sectors; unsigned int discard_granularity; unsigned int discard_alignment; @@ -471,6 +480,11 @@ struct request_queue { struct xarray hctx_table; struct percpu_ref q_usage_counter; + struct lock_class_key io_lock_cls_key; + struct lockdep_map io_lockdep_map; + + struct lock_class_key q_lock_cls_key; + struct lockdep_map q_lockdep_map; struct request *last_merge; @@ -566,6 +580,10 @@ struct request_queue { struct throtl_data *td; #endif struct rcu_head rcu_head; +#ifdef CONFIG_LOCKDEP + struct task_struct *mq_freeze_owner; + int mq_freeze_owner_depth; +#endif wait_queue_head_t mq_freeze_wq; /* * Protect concurrent access to q_usage_counter by @@ -617,6 +635,8 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) #define blk_queue_nonrot(q) (!((q)->limits.features & BLK_FEAT_ROTATIONAL)) #define blk_queue_io_stat(q) ((q)->limits.features & BLK_FEAT_IO_STAT) +#define blk_queue_passthrough_stat(q) \ + ((q)->limits.flags & BLK_FLAG_IOSTATS_PASSTHROUGH) #define blk_queue_dax(q) ((q)->limits.features & BLK_FEAT_DAX) #define blk_queue_pci_p2pdma(q) ((q)->limits.features & BLK_FEAT_PCI_P2PDMA) #ifdef CONFIG_BLK_RQ_ALLOC_TIME @@ -725,6 +745,9 @@ static inline unsigned int blk_queue_depth(struct request_queue *q) #define for_each_bio(_bio) \ for (; _bio; _bio = _bio->bi_next) +int __must_check add_disk_fwnode(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups, + struct fwnode_handle *fwnode); int __must_check device_add_disk(struct device *parent, struct gendisk *disk, const struct attribute_group **groups); static inline int __must_check add_disk(struct gendisk *disk) @@ -756,13 +779,13 @@ static inline void bdev_clear_flag(struct block_device *bdev, unsigned flag) atomic_andnot(flag, &bdev->__bd_flags); } -static inline int get_disk_ro(struct gendisk *disk) +static inline bool get_disk_ro(struct gendisk *disk) { return bdev_test_flag(disk->part0, BD_READ_ONLY) || test_bit(GD_READ_ONLY, &disk->state); } -static inline int bdev_read_only(struct block_device *bdev) +static inline bool bdev_read_only(struct block_device *bdev) { return bdev_test_flag(bdev, BD_READ_ONLY) || get_disk_ro(bdev->bd_disk); } @@ -929,6 +952,7 @@ queue_limits_start_update(struct request_queue *q) int queue_limits_commit_update(struct request_queue *q, struct queue_limits *lim); int queue_limits_set(struct request_queue *q, struct queue_limits *lim); +int blk_validate_limits(struct queue_limits *lim); /** * queue_limits_cancel_update - cancel an atomic update of queue limits @@ -986,6 +1010,11 @@ extern void blk_put_queue(struct request_queue *); void blk_mark_disk_dead(struct gendisk *disk); +struct rq_list { + struct request *head; + struct request *tail; +}; + #ifdef CONFIG_BLOCK /* * blk_plug permits building a queue of related requests by holding the I/O @@ -999,10 +1028,10 @@ void blk_mark_disk_dead(struct gendisk *disk); * blk_flush_plug() is called. */ struct blk_plug { - struct request *mq_list; /* blk-mq requests */ + struct rq_list mq_list; /* blk-mq requests */ /* if ios_left is > 1, we can batch tag/rq allocations */ - struct request *cached_rq; + struct rq_list cached_rqs; u64 cur_ktime; unsigned short nr_ios; @@ -1145,6 +1174,11 @@ enum blk_default_limits { */ #define BLK_DEF_MAX_SECTORS_CAP 2560u +static inline struct queue_limits *bdev_limits(struct block_device *bdev) +{ + return &bdev_get_queue(bdev)->limits; +} + static inline unsigned long queue_segment_boundary(const struct request_queue *q) { return q->limits.seg_boundary_mask; @@ -1185,25 +1219,9 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned int -queue_limits_max_zone_append_sectors(const struct queue_limits *l) -{ - unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); - - return min_not_zero(l->max_zone_append_sectors, max_sectors); -} - -static inline unsigned int queue_max_zone_append_sectors(struct request_queue *q) -{ - if (!blk_queue_is_zoned(q)) - return 0; - - return queue_limits_max_zone_append_sectors(&q->limits); -} - static inline bool queue_emulates_zone_append(struct request_queue *q) { - return blk_queue_is_zoned(q) && !q->limits.max_zone_append_sectors; + return blk_queue_is_zoned(q) && !q->limits.max_hw_zone_append_sectors; } static inline bool bdev_emulates_zone_append(struct block_device *bdev) @@ -1214,7 +1232,7 @@ static inline bool bdev_emulates_zone_append(struct block_device *bdev) static inline unsigned int bdev_max_zone_append_sectors(struct block_device *bdev) { - return queue_max_zone_append_sectors(bdev_get_queue(bdev)); + return bdev_limits(bdev)->max_zone_append_sectors; } static inline unsigned int bdev_max_segments(struct block_device *bdev) @@ -1247,7 +1265,7 @@ static inline unsigned int queue_io_min(const struct request_queue *q) return q->limits.io_min; } -static inline int bdev_io_min(struct block_device *bdev) +static inline unsigned int bdev_io_min(struct block_device *bdev) { return queue_io_min(bdev_get_queue(bdev)); } @@ -1257,7 +1275,7 @@ static inline unsigned int queue_io_opt(const struct request_queue *q) return q->limits.io_opt; } -static inline int bdev_io_opt(struct block_device *bdev) +static inline unsigned int bdev_io_opt(struct block_device *bdev) { return queue_io_opt(bdev_get_queue(bdev)); } @@ -1279,23 +1297,23 @@ unsigned int bdev_discard_alignment(struct block_device *bdev); static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_discard_sectors; + return bdev_limits(bdev)->max_discard_sectors; } static inline unsigned int bdev_discard_granularity(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.discard_granularity; + return bdev_limits(bdev)->discard_granularity; } static inline unsigned int bdev_max_secure_erase_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_secure_erase_sectors; + return bdev_limits(bdev)->max_secure_erase_sectors; } static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.max_write_zeroes_sectors; + return bdev_limits(bdev)->max_write_zeroes_sectors; } static inline bool bdev_nonrot(struct block_device *bdev) @@ -1331,7 +1349,7 @@ static inline bool bdev_write_cache(struct block_device *bdev) static inline bool bdev_fua(struct block_device *bdev) { - return bdev_get_queue(bdev)->limits.features & BLK_FEAT_FUA; + return bdev_limits(bdev)->features & BLK_FEAT_FUA; } static inline bool bdev_nowait(struct block_device *bdev) @@ -1376,7 +1394,34 @@ static inline bool bdev_is_zone_start(struct block_device *bdev, return bdev_offset_from_zone_start(bdev, sector) == 0; } -static inline int queue_dma_alignment(const struct request_queue *q) +/** + * bdev_zone_is_seq - check if a sector belongs to a sequential write zone + * @bdev: block device to check + * @sector: sector number + * + * Check if @sector on @bdev is contained in a sequential write required zone. + */ +static inline bool bdev_zone_is_seq(struct block_device *bdev, sector_t sector) +{ + bool is_seq = false; + +#if IS_ENABLED(CONFIG_BLK_DEV_ZONED) + if (bdev_is_zoned(bdev)) { + struct gendisk *disk = bdev->bd_disk; + unsigned long *bitmap; + + rcu_read_lock(); + bitmap = rcu_dereference(disk->conv_zones_bitmap); + is_seq = !bitmap || + !test_bit(disk_zone_no(disk, sector), bitmap); + rcu_read_unlock(); + } +#endif + + return is_seq; +} + +static inline unsigned int queue_dma_alignment(const struct request_queue *q) { return q->limits.dma_alignment; } @@ -1417,12 +1462,13 @@ static inline bool bdev_iter_is_aligned(struct block_device *bdev, bdev_logical_block_size(bdev) - 1); } -static inline int blk_lim_dma_alignment_and_pad(struct queue_limits *lim) +static inline unsigned int +blk_lim_dma_alignment_and_pad(struct queue_limits *lim) { return lim->dma_alignment | lim->dma_pad_mask; } -static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, +static inline bool blk_rq_aligned(struct request_queue *q, unsigned long addr, unsigned int len) { unsigned int alignment = blk_lim_dma_alignment_and_pad(&q->limits); @@ -1540,7 +1586,6 @@ static inline void bio_end_io_acct(struct bio *bio, unsigned long start_time) return bio_end_io_acct_remapped(bio, start_time, bio->bi_bdev); } -int bdev_read_only(struct block_device *bdev); int set_blocksize(struct file *file, int size); int lookup_bdev(const char *pathname, dev_t *dev); @@ -1648,7 +1693,7 @@ int bdev_thaw(struct block_device *bdev); void bdev_fput(struct file *bdev_file); struct io_comp_batch { - struct request *req_list; + struct rq_list req_list; bool need_ts; void (*complete)(struct io_comp_batch *); }; @@ -1674,6 +1719,22 @@ static inline bool bdev_can_atomic_write(struct block_device *bdev) return true; } +static inline unsigned int +bdev_atomic_write_unit_min_bytes(struct block_device *bdev) +{ + if (!bdev_can_atomic_write(bdev)) + return 0; + return queue_atomic_write_unit_min_bytes(bdev_get_queue(bdev)); +} + +static inline unsigned int +bdev_atomic_write_unit_max_bytes(struct block_device *bdev) +{ + if (!bdev_can_atomic_write(bdev)) + return 0; + return queue_atomic_write_unit_max_bytes(bdev_get_queue(bdev)); +} + #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { } #endif /* _LINUX_BLKDEV_H */ diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h index cffa38a73618..d8a8d245824a 100644 --- a/include/linux/bootmem_info.h +++ b/include/linux/bootmem_info.h @@ -6,11 +6,10 @@ #include <linux/kmemleak.h> /* - * Types for free bootmem stored in page->lru.next. These have to be in - * some random range in unsigned long space for debugging purposes. + * Types for free bootmem stored in the low bits of page->private. */ -enum { - MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 12, +enum bootmem_type { + MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE = 1, SECTION_INFO = MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE, MIX_SECTION_INFO, NODE_INFO, @@ -21,9 +20,19 @@ enum { void __init register_page_bootmem_info_node(struct pglist_data *pgdat); void get_page_bootmem(unsigned long info, struct page *page, - unsigned long type); + enum bootmem_type type); void put_page_bootmem(struct page *page); +static inline enum bootmem_type bootmem_type(const struct page *page) +{ + return (unsigned long)page->private & 0xf; +} + +static inline unsigned long bootmem_info(const struct page *page) +{ + return (unsigned long)page->private >> 4; +} + /* * Any memory allocated via the memblock allocator and not via the * buddy will be marked reserved already in the memmap. For those @@ -31,7 +40,7 @@ void put_page_bootmem(struct page *page); */ static inline void free_bootmem_page(struct page *page) { - unsigned long magic = page->index; + enum bootmem_type type = bootmem_type(page); /* * The reserve_bootmem_region sets the reserved flag on bootmem @@ -39,7 +48,7 @@ static inline void free_bootmem_page(struct page *page) */ VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); - if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) + if (type == SECTION_INFO || type == MIX_SECTION_INFO) put_page_bootmem(page); else VM_BUG_ON_PAGE(1, page); @@ -53,8 +62,18 @@ static inline void put_page_bootmem(struct page *page) { } +static inline enum bootmem_type bootmem_type(const struct page *page) +{ + return SECTION_INFO; +} + +static inline unsigned long bootmem_info(const struct page *page) +{ + return 0; +} + static inline void get_page_bootmem(unsigned long info, struct page *page, - unsigned long type) + enum bootmem_type type) { } diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index ce91d9b2acb9..7fc69083e745 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -138,7 +138,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, short access, enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, - struct ctl_table *table, int write, + const struct ctl_table *table, int write, char **buf, size_t *pcount, loff_t *ppos, enum cgroup_bpf_attach_type atype); @@ -209,7 +209,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk, int __ret = 0; \ if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ - if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \ + if (__sk && __sk == skb_to_full_sk(skb) && \ cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ CGROUP_INET_EGRESS); \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index bdadb0bb6cec..eaee2a819f4c 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -203,6 +203,7 @@ enum btf_field_type { BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, BPF_REFCOUNT = (1 << 9), BPF_WORKQUEUE = (1 << 10), + BPF_UPTR = (1 << 11), }; typedef void (*btf_dtor_kfunc_t)(void *); @@ -322,6 +323,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type) return "kptr"; case BPF_KPTR_PERCPU: return "percpu_kptr"; + case BPF_UPTR: + return "uptr"; case BPF_LIST_HEAD: return "bpf_list_head"; case BPF_LIST_NODE: @@ -350,6 +353,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: return sizeof(u64); case BPF_LIST_HEAD: return sizeof(struct bpf_list_head); @@ -379,6 +383,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: return __alignof__(u64); case BPF_LIST_HEAD: return __alignof__(struct bpf_list_head); @@ -419,6 +424,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) case BPF_KPTR_UNREF: case BPF_KPTR_REF: case BPF_KPTR_PERCPU: + case BPF_UPTR: break; default: WARN_ON_ONCE(1); @@ -507,6 +513,25 @@ static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src bpf_obj_memcpy(map->record, dst, src, map->value_size, true); } +static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src) +{ + unsigned long *src_uptr, *dst_uptr; + const struct btf_field *field; + int i; + + if (!btf_record_has_field(rec, BPF_UPTR)) + return; + + for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { + if (field->type != BPF_UPTR) + continue; + + src_uptr = src + field->offset; + dst_uptr = dst + field->offset; + swap(*src_uptr, *dst_uptr); + } +} + static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size) { u32 curr_off = 0; @@ -907,10 +932,6 @@ enum bpf_reg_type { * additional context, assume the value is non-null. */ PTR_TO_BTF_ID, - /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not - * been checked for null. Used primarily to inform the verifier - * an explicit null check is required for this struct. - */ PTR_TO_MEM, /* reg points to valid memory region */ PTR_TO_ARENA, PTR_TO_BUF, /* reg points to a read/write buffer */ @@ -923,6 +944,10 @@ enum bpf_reg_type { PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, + /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not + * been checked for null. Used primarily to inform the verifier + * an explicit null check is required for this struct. + */ PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, /* This must be the last entry. Its purpose is to ensure the enum is @@ -1300,8 +1325,12 @@ void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); #ifdef CONFIG_BPF_JIT -int bpf_trampoline_link_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); -int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, struct bpf_trampoline *tr); +int bpf_trampoline_link_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); +int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog); struct bpf_trampoline *bpf_trampoline_get(u64 key, struct bpf_attach_target_info *tgt_info); void bpf_trampoline_put(struct bpf_trampoline *tr); @@ -1373,7 +1402,8 @@ int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_func void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, struct bpf_prog *to); /* Called only from JIT-enabled code, so there's no need for stubs. */ -void bpf_image_ksym_add(void *data, unsigned int size, struct bpf_ksym *ksym); +void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym); +void bpf_image_ksym_add(struct bpf_ksym *ksym); void bpf_image_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_add(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym); @@ -1382,12 +1412,14 @@ void bpf_jit_uncharge_modmem(u32 size); bool bpf_prog_has_trampoline(const struct bpf_prog *prog); #else static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, - struct bpf_trampoline *tr) + struct bpf_trampoline *tr, + struct bpf_prog *tgt_prog) { return -ENOTSUPP; } @@ -1476,6 +1508,7 @@ struct bpf_prog_aux { u32 max_rdwr_access; struct btf *attach_btf; const struct bpf_ctx_arg_aux *ctx_arg_info; + void __percpu *priv_stack_ptr; struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ struct bpf_prog *dst_prog; struct bpf_trampoline *dst_trampoline; @@ -1491,7 +1524,13 @@ struct bpf_prog_aux { bool xdp_has_frags; bool exception_cb; bool exception_boundary; + bool is_extended; /* true if extended by freplace program */ + bool jits_use_priv_stack; + bool priv_stack_requested; + u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ + struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ struct bpf_arena *arena; + void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ const struct btf_type *attach_func_proto; /* function name for valid attach_btf_id */ @@ -1606,6 +1645,11 @@ struct bpf_link { enum bpf_link_type type; const struct bpf_link_ops *ops; struct bpf_prog *prog; + /* whether BPF link itself has "sleepable" semantics, which can differ + * from underlying BPF program having a "sleepable" semantics, as BPF + * link's semantics is determined by target attach hook + */ + bool sleepable; /* rcu is used before freeing, work can be used to schedule that * RCU-based freeing before that, so they never overlap */ @@ -1622,8 +1666,10 @@ struct bpf_link_ops { */ void (*dealloc)(struct bpf_link *link); /* deallocate link resources callback, called after RCU grace period; - * if underlying BPF program is sleepable we go through tasks trace - * RCU GP and then "classic" RCU GP + * if either the underlying BPF program is sleepable or BPF link's + * target hook is sleepable, we'll go through tasks trace RCU GP and + * then "classic" RCU GP; this need for chaining tasks trace and + * classic RCU GPs is designated by setting bpf_link->sleepable flag */ void (*dealloc_deferred)(struct bpf_link *link); int (*detach)(struct bpf_link *link); @@ -2370,6 +2416,9 @@ int bpf_prog_new_fd(struct bpf_prog *prog); void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, const struct bpf_link_ops *ops, struct bpf_prog *prog); +void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog, + bool sleepable); int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); int bpf_link_settle(struct bpf_link_primer *primer); void bpf_link_cleanup(struct bpf_link_primer *primer); @@ -2725,6 +2774,12 @@ static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, { } +static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, + const struct bpf_link_ops *ops, struct bpf_prog *prog, + bool sleepable) +{ +} + static inline int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) { @@ -3461,4 +3516,10 @@ static inline bool bpf_is_subprog(const struct bpf_prog *prog) return prog->aux->func_idx != 0; } +static inline bool bpf_prog_is_raw_tp(const struct bpf_prog *prog) +{ + return prog->type == BPF_PROG_TYPE_TRACING && + prog->expected_attach_type == BPF_TRACE_RAW_TP; +} + #endif /* _LINUX_BPF_H */ diff --git a/include/linux/bpf_local_storage.h b/include/linux/bpf_local_storage.h index dcddb0aef7d8..ab7244d8108f 100644 --- a/include/linux/bpf_local_storage.h +++ b/include/linux/bpf_local_storage.h @@ -77,7 +77,13 @@ struct bpf_local_storage_elem { struct hlist_node map_node; /* Linked to bpf_local_storage_map */ struct hlist_node snode; /* Linked to bpf_local_storage */ struct bpf_local_storage __rcu *local_storage; - struct rcu_head rcu; + union { + struct rcu_head rcu; + struct hlist_node free_node; /* used to postpone + * bpf_selem_free + * after raw_spin_unlock + */ + }; /* 8 bytes hole */ /* The data is stored in another cacheline to minimize * the number of cachelines access during a cache hit. @@ -181,7 +187,7 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap, struct bpf_local_storage_elem * bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value, - bool charge_mem, gfp_t gfp_flags); + bool charge_mem, bool swap_uptrs, gfp_t gfp_flags); void bpf_selem_free(struct bpf_local_storage_elem *selem, struct bpf_local_storage_map *smap, @@ -195,7 +201,7 @@ bpf_local_storage_alloc(void *owner, struct bpf_local_storage_data * bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, - void *value, u64 map_flags, gfp_t gfp_flags); + void *value, u64 map_flags, bool swap_uptrs, gfp_t gfp_flags); u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map); diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 4513372c5bc8..f4290c179bee 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -48,22 +48,6 @@ enum bpf_reg_liveness { REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ }; -/* For every reg representing a map value or allocated object pointer, - * we consider the tuple of (ptr, id) for them to be unique in verifier - * context and conside them to not alias each other for the purposes of - * tracking lock state. - */ -struct bpf_active_lock { - /* This can either be reg->map_ptr or reg->btf. If ptr is NULL, - * there's no active lock held, and other fields have no - * meaning. If non-NULL, it indicates that a lock is held and - * id member has the reg->id of the register which can be >= 0. - */ - void *ptr; - /* This will be reg->id */ - u32 id; -}; - #define ITER_PREFIX "bpf_iter_" enum bpf_iter_state { @@ -266,6 +250,13 @@ struct bpf_stack_state { }; struct bpf_reference_state { + /* Each reference object has a type. Ensure REF_TYPE_PTR is zero to + * default to pointer reference on zero initialization of a state. + */ + enum ref_state_type { + REF_TYPE_PTR = 0, + REF_TYPE_LOCK, + } type; /* Track each reference created with a unique id, even if the same * instruction creates the reference multiple times (eg, via CALL). */ @@ -274,17 +265,10 @@ struct bpf_reference_state { * is used purely to inform the user of a reference leak. */ int insn_idx; - /* There can be a case like: - * main (frame 0) - * cb (frame 1) - * func (frame 3) - * cb (frame 4) - * Hence for frame 4, if callback_ref just stored boolean, it would be - * impossible to distinguish nested callback refs. Hence store the - * frameno and compare that to callback_ref in check_reference_leak when - * exiting a callback function. - */ - int callback_ref; + /* Use to keep track of the source object of a lock, to ensure + * it matches on unlock. + */ + void *ptr; }; struct bpf_retval_range { @@ -332,6 +316,7 @@ struct bpf_func_state { /* The following fields should be last. See copy_func_state() */ int acquired_refs; + int active_locks; struct bpf_reference_state *refs; /* The state of the stack. Each element of the array describes BPF_REG_SIZE * (i.e. 8) bytes worth of stack memory. @@ -349,7 +334,7 @@ struct bpf_func_state { #define MAX_CALL_FRAMES 8 -/* instruction history flags, used in bpf_jmp_history_entry.flags field */ +/* instruction history flags, used in bpf_insn_hist_entry.flags field */ enum { /* instruction references stack slot through PTR_TO_STACK register; * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8) @@ -367,7 +352,7 @@ enum { static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES); static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8); -struct bpf_jmp_history_entry { +struct bpf_insn_hist_entry { u32 idx; /* insn idx can't be bigger than 1 million */ u32 prev_idx : 22; @@ -434,7 +419,6 @@ struct bpf_verifier_state { u32 insn_idx; u32 curframe; - struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; u32 active_preempt_lock; @@ -458,13 +442,14 @@ struct bpf_verifier_state { * See get_loop_entry() for more information. */ struct bpf_verifier_state *loop_entry; - /* jmp history recorded from first to last. - * backtracking is using it to go from last to first. - * For most states jmp_history_cnt is [0-3]. + /* Sub-range of env->insn_hist[] corresponding to this state's + * instruction history. + * Backtracking is using it to go from last to first. + * For most states instruction history is short, 0-3 instructions. * For loops can go up to ~40. */ - struct bpf_jmp_history_entry *jmp_history; - u32 jmp_history_cnt; + u32 insn_hist_start; + u32 insn_hist_end; u32 dfs_depth; u32 callback_unroll_depth; u32 may_goto_depth; @@ -649,6 +634,12 @@ struct bpf_subprog_arg_info { }; }; +enum priv_stack_mode { + PRIV_STACK_UNKNOWN, + NO_PRIV_STACK, + PRIV_STACK_ADAPTIVE, +}; + struct bpf_subprog_info { /* 'start' has to be the first field otherwise find_subprog() won't work */ u32 start; /* insn idx of function entry point */ @@ -669,6 +660,7 @@ struct bpf_subprog_info { /* true if bpf_fastcall stack region is used by functions that can't be inlined */ bool keep_fastcall_stack: 1; + enum priv_stack_mode priv_stack_mode; u8 arg_cnt; struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; }; @@ -747,7 +739,9 @@ struct bpf_verifier_env { int cur_stack; } cfg; struct backtrack_state bt; - struct bpf_jmp_history_entry *cur_hist_ent; + struct bpf_insn_hist_entry *insn_hist; + struct bpf_insn_hist_entry *cur_hist_ent; + u32 insn_hist_cap; u32 pass_cnt; /* number of times do_check() was called */ u32 subprog_cnt; /* number of instructions analyzed by the verifier */ @@ -888,6 +882,7 @@ static inline bool bpf_prog_check_recur(const struct bpf_prog *prog) case BPF_PROG_TYPE_TRACING: return prog->expected_attach_type != BPF_TRACE_ITER; case BPF_PROG_TYPE_STRUCT_OPS: + return prog->aux->jits_use_priv_stack; case BPF_PROG_TYPE_LSM: return false; default: diff --git a/include/linux/btf.h b/include/linux/btf.h index b8a583194c4a..4214e76c9168 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -75,6 +75,7 @@ #define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */ #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */ #define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */ +#define KF_FASTCALL (1 << 12) /* kfunc supports bpf_fastcall protocol */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the @@ -581,6 +582,16 @@ int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty bool btf_types_are_same(const struct btf *btf1, u32 id1, const struct btf *btf2, u32 id2); int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx); + +static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) +{ + if (!btf_type_is_ptr(t)) + return false; + + t = btf_type_skip_modifiers(btf, t->type, NULL); + + return btf_type_is_struct(t); +} #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) @@ -660,15 +671,4 @@ static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *fun return -EOPNOTSUPP; } #endif - -static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) -{ - if (!btf_type_is_ptr(t)) - return false; - - t = btf_type_skip_modifiers(btf, t->type, NULL); - - return btf_type_is_struct(t); -} - #endif diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index c0e3e1426a82..139bdececdcf 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -283,5 +283,6 @@ extern u32 btf_tracing_ids[]; extern u32 bpf_cgroup_btf_id[]; extern u32 bpf_local_storage_map_btf_id[]; extern u32 btf_bpf_map_id[]; +extern u32 bpf_kmem_cache_btf_id[]; #endif diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index ee1d0e5f9789..2d7d86f0290d 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -808,7 +808,7 @@ struct ceph_mds_caps { struct ceph_mds_cap_peer { __le64 cap_id; - __le32 seq; + __le32 issue_seq; __le32 mseq; __le32 mds; __u8 flags; @@ -822,7 +822,7 @@ struct ceph_mds_cap_release { struct ceph_mds_cap_item { __le64 ino; __le64 cap_id; - __le32 migrate_seq, seq; + __le32 migrate_seq, issue_seq; } __attribute__ ((packed)); #define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 15fb566d3f46..733e7f93db66 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -317,12 +317,6 @@ extern void ceph_release_page_vector(struct page **pages, int num_pages); extern void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty); extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); -extern int ceph_copy_user_to_page_vector(struct page **pages, - const void __user *data, - loff_t off, size_t len); -extern void ceph_copy_to_page_vector(struct page **pages, - const void *data, - loff_t off, size_t len); extern void ceph_copy_from_page_vector(struct page **pages, void *data, loff_t off, size_t len); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index d7941478158c..d55b30057a45 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -626,8 +626,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, u32 timeout, struct page ***preply_pages, size_t *preply_len); -int ceph_osdc_watch_check(struct ceph_osd_client *osdc, - struct ceph_osd_linger_request *lreq); int ceph_osdc_list_watchers(struct ceph_osd_client *osdc, struct ceph_object_id *oid, struct ceph_object_locator *oloc, diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h index 5dead8486fd8..879bec0863aa 100644 --- a/include/linux/ceph/pagelist.h +++ b/include/linux/ceph/pagelist.h @@ -17,12 +17,6 @@ struct ceph_pagelist { refcount_t refcnt; }; -struct ceph_pagelist_cursor { - struct ceph_pagelist *pl; /* pagelist, for error checking */ - struct list_head *page_lru; /* page in list */ - size_t room; /* room remaining to reset to */ -}; - struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags); extern void ceph_pagelist_release(struct ceph_pagelist *pl); @@ -33,12 +27,6 @@ extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); -extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, - struct ceph_pagelist_cursor *c); - -extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, - struct ceph_pagelist_cursor *c); - static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) { __le64 ev = cpu_to_le64(v); diff --git a/include/linux/cfag12864b.h b/include/linux/cfag12864b.h index 6617d9c68d86..83e6613d12ae 100644 --- a/include/linux/cfag12864b.h +++ b/include/linux/cfag12864b.h @@ -28,13 +28,6 @@ extern unsigned char * cfag12864b_buffer; /* - * Get the refresh rate of the LCD - * - * Returns the refresh rate (hertz). - */ -extern unsigned int cfag12864b_getrate(void); - -/* * Enable refreshing * * Returns 0 if successful (anyone was using it), @@ -50,16 +43,6 @@ extern unsigned char cfag12864b_enable(void); extern void cfag12864b_disable(void); /* - * Is enabled refreshing? (is anyone using the module?) - * - * Returns 0 if refreshing is not enabled (anyone is using it), - * or != 0 if refreshing is enabled (someone is using it). - * - * Useful for buffer read-only modules. - */ -extern unsigned char cfag12864b_isenabled(void); - -/* * Is the module inited? */ extern unsigned char cfag12864b_isinited(void); diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 47ae4c4d924c..1b20d2d8ef7c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -327,6 +327,7 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif + u64 ntime; }; /* @@ -397,7 +398,7 @@ struct cgroup_freezer_state { bool freeze; /* Should the cgroup actually be frozen? */ - int e_freeze; + bool e_freeze; /* Fields below are protected by css_set_lock */ diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index 038b2d523bf8..966fcc5ff8ef 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -234,7 +234,7 @@ const volatile void * __must_check_fn(const volatile void *val) * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd) * * CLASS(fdget, f)(fd); - * if (!fd_file(f)) + * if (fd_empty(f)) * return -EBADF; * * // use 'f' without concern @@ -273,6 +273,12 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * an anonymous instance of the (guard) class, not recommended for * conditional locks. * + * if_not_guard(name, args...) { <error handling> }: + * convenience macro for conditional guards that calls the statement that + * follows only if the lock was not acquired (typically an error return). + * + * Only for conditional locks. + * * scoped_guard (name, args...) { }: * similar to CLASS(name, scope)(args), except the variable (with the * explicit name 'scope') is declard in a for-loop such that its scope is @@ -285,14 +291,20 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ * similar to scoped_guard(), except it does fail when the lock * acquire fails. * + * Only for conditional locks. */ +#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \ +static __maybe_unused const bool class_##_name##_is_conditional = _is_cond + #define DEFINE_GUARD(_name, _type, _lock, _unlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \ - { return *_T; } + { return (void *)(__force unsigned long)*_T; } #define DEFINE_GUARD_COND(_name, _ext, _condlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \ class_##_name##_t _T) \ @@ -303,16 +315,48 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ CLASS(_name, __UNIQUE_ID(guard)) #define __guard_ptr(_name) class_##_name##_lock_ptr +#define __is_cond_ptr(_name) class_##_name##_is_conditional -#define scoped_guard(_name, args...) \ - for (CLASS(_name, scope)(args), \ - *done = NULL; __guard_ptr(_name)(&scope) && !done; done = (void *)1) - -#define scoped_cond_guard(_name, _fail, args...) \ - for (CLASS(_name, scope)(args), \ - *done = NULL; !done; done = (void *)1) \ - if (!__guard_ptr(_name)(&scope)) _fail; \ - else +/* + * Helper macro for scoped_guard(). + * + * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that + * compiler would be sure that for the unconditional locks the body of the + * loop (caller-provided code glued to the else clause) could not be skipped. + * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too + * hard to deduce (even if could be proven true for unconditional locks). + */ +#define __scoped_guard(_name, _label, args...) \ + for (CLASS(_name, scope)(args); \ + __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \ + ({ goto _label; })) \ + if (0) { \ +_label: \ + break; \ + } else + +#define scoped_guard(_name, args...) \ + __scoped_guard(_name, __UNIQUE_ID(label), args) + +#define __scoped_cond_guard(_name, _fail, _label, args...) \ + for (CLASS(_name, scope)(args); true; ({ goto _label; })) \ + if (!__guard_ptr(_name)(&scope)) { \ + BUILD_BUG_ON(!__is_cond_ptr(_name)); \ + _fail; \ +_label: \ + break; \ + } else + +#define scoped_cond_guard(_name, _fail, args...) \ + __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args) + +#define __if_not_guard(_name, _id, args...) \ + BUILD_BUG_ON(!__is_cond_ptr(_name)); \ + CLASS(_name, _id)(args); \ + if (!__guard_ptr(_name)(&_id)) + +#define if_not_guard(_name, args...) \ + __if_not_guard(_name, __UNIQUE_ID(guard), args) /* * Additional helper macros for generating lock guards with types, either for @@ -347,7 +391,7 @@ static inline void class_##_name##_destructor(class_##_name##_t *_T) \ \ static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \ { \ - return _T->lock; \ + return (void *)(__force unsigned long)_T->lock; \ } @@ -369,14 +413,17 @@ static inline class_##_name##_t class_##_name##_constructor(void) \ } #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ +__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_1(_name, _type, _lock) #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \ +__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_0(_name, _lock) #define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \ + __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\ if (_T->lock && !(_condlock)) _T->lock = NULL; \ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 7e43caabb54b..2e6e603b7493 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -623,6 +623,24 @@ struct clk *clk_register_gate(struct device *dev, const char *name, NULL, (flags), (reg), (bit_idx), \ (clk_gate_flags), (lock)) /** + * devm_clk_hw_register_gate_parent_hw - register a gate clock with the clock + * framework + * @dev: device that is registering this clock + * @name: name of this clock + * @parent_hw: pointer to parent clk + * @flags: framework-specific flags for this clock + * @reg: register address to control gating of this clock + * @bit_idx: which bit in the register controls gating of this clock + * @clk_gate_flags: gate-specific flags for this clock + * @lock: shared register lock for this clock + */ +#define devm_clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, \ + reg, bit_idx, clk_gate_flags, \ + lock) \ + __devm_clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \ + NULL, (flags), (reg), (bit_idx), \ + (clk_gate_flags), (lock)) +/** * devm_clk_hw_register_gate_parent_data - register a gate clock with the * clock framework * @dev: device that is registering this clock @@ -689,13 +707,15 @@ struct clk_div_table { * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used * for the divider register. Setting this flag makes the register accesses * big endian. + * CLK_DIVIDER_EVEN_INTEGERS - clock divisor is 2, 4, 6, 8, 10, etc. + * Formula is 2 * (value read from hardware + 1). */ struct clk_divider { struct clk_hw hw; void __iomem *reg; u8 shift; u8 width; - u8 flags; + u16 flags; const struct clk_div_table *table; spinlock_t *lock; }; @@ -711,6 +731,7 @@ struct clk_divider { #define CLK_DIVIDER_READ_ONLY BIT(5) #define CLK_DIVIDER_MAX_AT_ZERO BIT(6) #define CLK_DIVIDER_BIG_ENDIAN BIT(7) +#define CLK_DIVIDER_EVEN_INTEGERS BIT(8) extern const struct clk_ops clk_divider_ops; extern const struct clk_ops clk_divider_ro_ops; @@ -740,19 +761,21 @@ struct clk_hw *__clk_hw_register_divider(struct device *dev, struct device_node *np, const char *name, const char *parent_name, const struct clk_hw *parent_hw, const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, + void __iomem *reg, u8 shift, u8 width, + unsigned long clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); struct clk_hw *__devm_clk_hw_register_divider(struct device *dev, struct device_node *np, const char *name, const char *parent_name, const struct clk_hw *parent_hw, const struct clk_parent_data *parent_data, unsigned long flags, - void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, + void __iomem *reg, u8 shift, u8 width, + unsigned long clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); struct clk *clk_register_divider_table(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, - u8 clk_divider_flags, const struct clk_div_table *table, - spinlock_t *lock); + unsigned long clk_divider_flags, + const struct clk_div_table *table, spinlock_t *lock); /** * clk_register_divider - register a divider clock with the clock framework * @dev: device registering this clock @@ -1137,6 +1160,9 @@ struct clk_hw *clk_hw_register_fixed_factor_with_accuracy_fwname(struct device * struct device_node *np, const char *name, const char *fw_name, unsigned long flags, unsigned int mult, unsigned int div, unsigned long acc); +struct clk_hw *clk_hw_register_fixed_factor_index(struct device *dev, + const char *name, unsigned int index, unsigned long flags, + unsigned int mult, unsigned int div); void clk_hw_unregister_fixed_factor(struct clk_hw *hw); struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, const char *name, const char *parent_name, unsigned long flags, @@ -1360,7 +1386,6 @@ unsigned long clk_hw_get_flags(const struct clk_hw *hw); (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) bool clk_hw_is_prepared(const struct clk_hw *hw); -bool clk_hw_rate_is_protected(const struct clk_hw *hw); bool clk_hw_is_enabled(const struct clk_hw *hw); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); diff --git a/include/linux/clk.h b/include/linux/clk.h index 851a0f2cf42c..1dcee6d701e4 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -496,11 +496,13 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, struct clk_bulk_data **clks); /** - * devm_clk_bulk_get_all_enable - Get and enable all clocks of the consumer (managed) + * devm_clk_bulk_get_all_enabled - Get and enable all clocks of the consumer (managed) * @dev: device for clock "consumer" * @clks: pointer to the clk_bulk_data table of consumer * - * Returns success (0) or negative errno. + * Returns a positive value for the number of clocks obtained while the + * clock references are stored in the clk_bulk_data table in @clks field. + * Returns 0 if there're none and a negative value if something failed. * * This helper function allows drivers to get all clocks of the * consumer and enables them in one operation with management. @@ -508,8 +510,8 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, * is unbound. */ -int __must_check devm_clk_bulk_get_all_enable(struct device *dev, - struct clk_bulk_data **clks); +int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, + struct clk_bulk_data **clks); /** * devm_clk_get - lookup and obtain a managed reference to a clock producer. @@ -1034,7 +1036,7 @@ static inline int __must_check devm_clk_bulk_get_all(struct device *dev, return 0; } -static inline int __must_check devm_clk_bulk_get_all_enable(struct device *dev, +static inline int __must_check devm_clk_bulk_get_all_enabled(struct device *dev, struct clk_bulk_data **clks) { return 0; @@ -1136,6 +1138,15 @@ static inline void clk_restore_context(void) {} #endif +/* Deprecated. Use devm_clk_bulk_get_all_enabled() */ +static inline int __must_check +devm_clk_bulk_get_all_enable(struct device *dev, struct clk_bulk_data **clks) +{ + int ret = devm_clk_bulk_get_all_enabled(dev, clks); + + return ret > 0 ? 0 : ret; +} + /* clk_prepare_enable helps cases using clk_enable in non-atomic context. */ static inline int clk_prepare_enable(struct clk *clk) { diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index d35b677b08fe..ef1b16da6ad5 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -215,7 +215,6 @@ static inline s64 clocksource_cyc2ns(u64 cycles, u32 mult, u32 shift) extern int clocksource_unregister(struct clocksource*); extern void clocksource_touch_watchdog(void); -extern void clocksource_change_rating(struct clocksource *cs, int rating); extern void clocksource_suspend(void); extern void clocksource_resume(void); extern struct clocksource * __init clocksource_default_clock(void); diff --git a/include/linux/clocksource_ids.h b/include/linux/clocksource_ids.h index 2bb4d8c2f1b0..c4ef4ae2eded 100644 --- a/include/linux/clocksource_ids.h +++ b/include/linux/clocksource_ids.h @@ -6,6 +6,7 @@ enum clocksource_ids { CSID_GENERIC = 0, CSID_ARM_ARCH_COUNTER, + CSID_S390_TOD, CSID_X86_TSC_EARLY, CSID_X86_TSC, CSID_X86_KVM_CLK, diff --git a/include/linux/codetag.h b/include/linux/codetag.h index c2a579ccd455..d14dbd26b370 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -13,6 +13,9 @@ struct codetag_module; struct seq_buf; struct module; +#define CODETAG_SECTION_START_PREFIX "__start_" +#define CODETAG_SECTION_STOP_PREFIX "__stop_" + /* * An instance of this structure is created in a special ELF section at every * code location being tagged. At runtime, the special section is treated as @@ -35,8 +38,15 @@ struct codetag_type_desc { size_t tag_size; void (*module_load)(struct codetag_type *cttype, struct codetag_module *cmod); - bool (*module_unload)(struct codetag_type *cttype, + void (*module_unload)(struct codetag_type *cttype, struct codetag_module *cmod); +#ifdef CONFIG_MODULES + void (*module_replaced)(struct module *mod, struct module *new_mod); + bool (*needs_section_mem)(struct module *mod, unsigned long size); + void *(*alloc_section_mem)(struct module *mod, unsigned long size, + unsigned int prepend, unsigned long align); + void (*free_section_mem)(struct module *mod, bool used); +#endif }; struct codetag_iterator { @@ -71,11 +81,31 @@ struct codetag_type * codetag_register_type(const struct codetag_type_desc *desc); #if defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) + +bool codetag_needs_module_section(struct module *mod, const char *name, + unsigned long size); +void *codetag_alloc_module_section(struct module *mod, const char *name, + unsigned long size, unsigned int prepend, + unsigned long align); +void codetag_free_module_sections(struct module *mod); +void codetag_module_replaced(struct module *mod, struct module *new_mod); void codetag_load_module(struct module *mod); -bool codetag_unload_module(struct module *mod); -#else +void codetag_unload_module(struct module *mod); + +#else /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ + +static inline bool +codetag_needs_module_section(struct module *mod, const char *name, + unsigned long size) { return false; } +static inline void * +codetag_alloc_module_section(struct module *mod, const char *name, + unsigned long size, unsigned int prepend, + unsigned long align) { return NULL; } +static inline void codetag_free_module_sections(struct module *mod) {} +static inline void codetag_module_replaced(struct module *mod, struct module *new_mod) {} static inline void codetag_load_module(struct module *mod) {} -static inline bool codetag_unload_module(struct module *mod) { return true; } -#endif +static inline void codetag_unload_module(struct module *mod) {} + +#endif /* defined(CONFIG_CODE_TAGGING) && defined(CONFIG_MODULES) */ #endif /* _LINUX_CODETAG_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 4c1a39dcb624..2e7c2c282f3a 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H -#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." +#error "Please do not include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif /* Compiler specific definitions for Clang compiler */ diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index cd6f9aae311f..d0ed9583743f 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_TYPES_H -#error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." +#error "Please do not include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." #endif /* diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 4d4e23b6e3e7..469a64dd6495 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -239,11 +239,18 @@ static inline void *offset_to_ptr(const int *off) #endif /* __ASSEMBLY__ */ +#ifdef __CHECKER__ +#define __BUILD_BUG_ON_ZERO_MSG(e, msg) (0) +#else /* __CHECKER__ */ +#define __BUILD_BUG_ON_ZERO_MSG(e, msg) ((int)sizeof(struct {_Static_assert(!(e), msg);})) +#endif /* __CHECKER__ */ + /* &a[0] degrades to a pointer: a different type from an array */ -#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +#define __must_be_array(a) __BUILD_BUG_ON_ZERO_MSG(__same_type((a), &(a)[0]), "must be array") /* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */ -#define __must_be_cstr(p) BUILD_BUG_ON_ZERO(__annotated(p, nonstring)) +#define __must_be_cstr(p) \ + __BUILD_BUG_ON_ZERO_MSG(__annotated(p, nonstring), "must be cstr (NUL-terminated)") /* * This returns a constant expression while determining if an argument is diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 32284cd26d52..c16d4199bf92 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -95,19 +95,6 @@ #endif /* - * Optional: only supported since gcc >= 15 - * Optional: only supported since clang >= 18 - * - * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896 - * clang: https://github.com/llvm/llvm-project/pull/76348 - */ -#if __has_attribute(__counted_by__) -# define __counted_by(member) __attribute__((__counted_by__(member))) -#else -# define __counted_by(member) -#endif - -/* * Optional: not supported by gcc * Optional: only supported since clang >= 14.0 * diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 1a957ea2f4fe..981cc3d7e3aa 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -266,6 +266,12 @@ struct ftrace_likely_data { #define noinline_for_stack noinline /* + * Use noinline_for_tracing for functions that should not be inlined. + * For tracing reasons. + */ +#define noinline_for_tracing noinline + +/* * Sanitizer helper attributes: Because using __always_inline and * __no_sanitize_* conflict, provide helper attributes that will either expand * to __no_sanitize_* in compilation units where instrumentation is enabled @@ -324,6 +330,25 @@ struct ftrace_likely_data { #endif /* + * Optional: only supported since gcc >= 15 + * Optional: only supported since clang >= 18 + * + * gcc: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=108896 + * clang: https://github.com/llvm/llvm-project/pull/76348 + * + * __bdos on clang < 19.1.2 can erroneously return 0: + * https://github.com/llvm/llvm-project/pull/110497 + * + * __bdos on clang < 19.1.3 can be off by 4: + * https://github.com/llvm/llvm-project/pull/112636 + */ +#ifdef CONFIG_CC_HAS_COUNTED_BY +# define __counted_by(member) __attribute__((__counted_by__(member))) +#else +# define __counted_by(member) +#endif + +/* * Apply __counted_by() when the Endianness matches to increase test coverage. */ #ifdef __LITTLE_ENDIAN diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2361ed4d2b15..a04b73c40173 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -147,6 +147,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_EIOINTC_STARTING, CPUHP_AP_IRQ_AVECINTC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, + CPUHP_AP_IRQ_THEAD_ACLINT_SSWI_STARTING, CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, @@ -208,7 +209,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, - CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, @@ -227,6 +227,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, CPUHP_AP_PERF_ARM_MARVELL_CN10K_DDR_ONLINE, + CPUHP_AP_PERF_ARM_MRVL_PEM_ONLINE, CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 3183aeb7f5b4..a9ee4fe55dcf 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -61,7 +61,7 @@ struct cpuidle_state { struct cpuidle_driver *drv, int index); - int (*enter_dead) (struct cpuidle_device *dev, int index); + void (*enter_dead) (struct cpuidle_device *dev, int index); /* * CPUs execute ->enter_s2idle with the local tick or entire timekeeping diff --git a/include/linux/cred.h b/include/linux/cred.h index 2976f534a7a3..e4a3155fe409 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h @@ -172,6 +172,24 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred) cred->cap_inheritable)); } +/* + * Override creds without bumping reference count. Caller must ensure + * reference remains valid or has taken reference. Almost always not the + * interface you want. Use override_creds()/revert_creds() instead. + */ +static inline const struct cred *override_creds_light(const struct cred *override_cred) +{ + const struct cred *old = current->cred; + + rcu_assign_pointer(current->cred, override_cred); + return old; +} + +static inline void revert_creds_light(const struct cred *revert_cred) +{ + rcu_assign_pointer(current->cred, revert_cred); +} + /** * get_new_cred_many - Get references on a new set of credentials * @cred: The new credentials to reference diff --git a/include/linux/dax.h b/include/linux/dax.h index 9d3e3327af4c..df41a0017b31 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -27,12 +27,6 @@ struct dax_operations { */ long (*direct_access)(struct dax_device *, pgoff_t, long, enum dax_access_mode, void **, pfn_t *); - /* - * Validate whether this device is usable as an fsdax backing - * device. - */ - bool (*dax_supported)(struct dax_device *, struct block_device *, int, - sector_t, sector_t); /* zero_page_range: required operation. Zero page range */ int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); /* diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h index 0928a6c8ae1e..59444b495d49 100644 --- a/include/linux/debugfs.h +++ b/include/linux/debugfs.h @@ -71,9 +71,63 @@ typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *); struct dentry *debugfs_lookup(const char *name, struct dentry *parent); -struct dentry *debugfs_create_file(const char *name, umode_t mode, - struct dentry *parent, void *data, - const struct file_operations *fops); +struct debugfs_short_fops { + ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); + ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); + loff_t (*llseek) (struct file *, loff_t, int); +}; + +struct dentry *debugfs_create_file_full(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); +struct dentry *debugfs_create_file_short(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct debugfs_short_fops *fops); + +/** + * debugfs_create_file - create a file in the debugfs filesystem + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is NULL, then the + * file will be created in the root of the debugfs filesystem. + * @data: a pointer to something that the caller will want to get to later + * on. The inode.i_private pointer will point to this value on + * the open() call. + * @fops: a pointer to a struct file_operations or struct debugfs_short_fops that + * should be used for this file. + * + * This is the basic "create a file" function for debugfs. It allows for a + * wide range of flexibility in creating a file, or a directory (if you want + * to create a directory, the debugfs_create_dir() function is + * recommended to be used instead.) + * + * This function will return a pointer to a dentry if it succeeds. This + * pointer must be passed to the debugfs_remove() function when the file is + * to be removed (no automatic cleanup happens if your module is unloaded, + * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be + * returned. + * + * If debugfs is not enabled in the kernel, the value -%ENODEV will be + * returned. + * + * If fops points to a struct debugfs_short_fops, then simple_open() will be + * used for the open, and only read/write/llseek are supported and are proxied, + * so no module reference or release are needed. + * + * NOTE: it's expected that most callers should _ignore_ the errors returned + * by this function. Other debugfs functions handle the fact that the "dentry" + * passed to them could be an error and they don't crash in that case. + * Drivers should generally work fine even if debugfs fails to init anyway. + */ +#define debugfs_create_file(name, mode, parent, data, fops) \ + _Generic(fops, \ + const struct file_operations *: debugfs_create_file_full, \ + const struct debugfs_short_fops *: debugfs_create_file_short, \ + struct file_operations *: debugfs_create_file_full, \ + struct debugfs_short_fops *: debugfs_create_file_short) \ + (name, mode, parent, data, fops) + struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops); @@ -207,7 +261,7 @@ static inline struct dentry *debugfs_lookup(const char *name, static inline struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, - const struct file_operations *fops) + const void *fops) { return ERR_PTR(-ENODEV); } diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h index 32444686b6ff..8b95545e7924 100644 --- a/include/linux/debugobjects.h +++ b/include/linux/debugobjects.h @@ -23,13 +23,17 @@ struct debug_obj_descr; * @state: tracked object state * @astate: current active state * @object: pointer to the real object + * @batch_last: pointer to the last hlist node in a batch * @descr: pointer to an object type specific debug description structure */ struct debug_obj { - struct hlist_node node; - enum debug_obj_state state; - unsigned int astate; - void *object; + struct hlist_node node; + enum debug_obj_state state; + unsigned int astate; + union { + void *object; + struct hlist_node *batch_last; + }; const struct debug_obj_descr *descr; }; diff --git a/include/linux/delay.h b/include/linux/delay.h index ff9cda975e30..89866bab100d 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -6,21 +6,12 @@ * Copyright (C) 1993 Linus Torvalds * * Delay routines, using a pre-computed "loops_per_jiffy" value. - * - * Please note that ndelay(), udelay() and mdelay() may return early for - * several reasons: - * 1. computed loops_per_jiffy too low (due to the time taken to - * execute the timer interrupt.) - * 2. cache behaviour affecting the time it takes to execute the - * loop function. - * 3. CPU clock rate changes. - * - * Please see this thread: - * https://lists.openwall.net/linux-kernel/2011/01/09/56 + * Sleep routines using timer list timers or hrtimers. */ #include <linux/math.h> #include <linux/sched.h> +#include <linux/jiffies.h> extern unsigned long loops_per_jiffy; @@ -35,12 +26,21 @@ extern unsigned long loops_per_jiffy; * The 2nd mdelay() definition ensures GCC will optimize away the * while loop for the common cases where n <= MAX_UDELAY_MS -- Paul G. */ - #ifndef MAX_UDELAY_MS #define MAX_UDELAY_MS 5 #endif #ifndef mdelay +/** + * mdelay - Inserting a delay based on milliseconds with busy waiting + * @n: requested delay in milliseconds + * + * See udelay() for basic information about mdelay() and it's variants. + * + * Please double check, whether mdelay() is the right way to go or whether a + * refactoring of the code is the better variant to be able to use msleep() + * instead. + */ #define mdelay(n) (\ (__builtin_constant_p(n) && (n)<=MAX_UDELAY_MS) ? udelay((n)*1000) : \ ({unsigned long __ms=(n); while (__ms--) udelay(1000);})) @@ -63,30 +63,75 @@ unsigned long msleep_interruptible(unsigned int msecs); void usleep_range_state(unsigned long min, unsigned long max, unsigned int state); +/** + * usleep_range - Sleep for an approximate time + * @min: Minimum time in microseconds to sleep + * @max: Maximum time in microseconds to sleep + * + * For basic information please refere to usleep_range_state(). + * + * The task will be in the state TASK_UNINTERRUPTIBLE during the sleep. + */ static inline void usleep_range(unsigned long min, unsigned long max) { usleep_range_state(min, max, TASK_UNINTERRUPTIBLE); } -static inline void usleep_idle_range(unsigned long min, unsigned long max) +/** + * usleep_range_idle - Sleep for an approximate time with idle time accounting + * @min: Minimum time in microseconds to sleep + * @max: Maximum time in microseconds to sleep + * + * For basic information please refere to usleep_range_state(). + * + * The sleeping task has the state TASK_IDLE during the sleep to prevent + * contribution to the load avarage. + */ +static inline void usleep_range_idle(unsigned long min, unsigned long max) { usleep_range_state(min, max, TASK_IDLE); } +/** + * ssleep - wrapper for seconds around msleep + * @seconds: Requested sleep duration in seconds + * + * Please refere to msleep() for detailed information. + */ static inline void ssleep(unsigned int seconds) { msleep(seconds * 1000); } -/* see Documentation/timers/timers-howto.rst for the thresholds */ +static const unsigned int max_slack_shift = 2; +#define USLEEP_RANGE_UPPER_BOUND ((TICK_NSEC << max_slack_shift) / NSEC_PER_USEC) + +/** + * fsleep - flexible sleep which autoselects the best mechanism + * @usecs: requested sleep duration in microseconds + * + * flseep() selects the best mechanism that will provide maximum 25% slack + * to the requested sleep duration. Therefore it uses: + * + * * udelay() loop for sleep durations <= 10 microseconds to avoid hrtimer + * overhead for really short sleep durations. + * * usleep_range() for sleep durations which would lead with the usage of + * msleep() to a slack larger than 25%. This depends on the granularity of + * jiffies. + * * msleep() for all other sleep durations. + * + * Note: When %CONFIG_HIGH_RES_TIMERS is not set, all sleeps are processed with + * the granularity of jiffies and the slack might exceed 25% especially for + * short sleep durations. + */ static inline void fsleep(unsigned long usecs) { if (usecs <= 10) udelay(usecs); - else if (usecs <= 20000) - usleep_range(usecs, 2 * usecs); + else if (usecs < USLEEP_RANGE_UPPER_BOUND) + usleep_range(usecs, usecs + (usecs >> max_slack_shift)); else - msleep(DIV_ROUND_UP(usecs, 1000)); + msleep(DIV_ROUND_UP(usecs, USEC_PER_MSEC)); } #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index ca32b5bb28eb..eb2094e43050 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -276,6 +276,7 @@ do { \ dev_driver_string(dev), dev_name(dev), ## arg) __printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...); +__printf(3, 4) int dev_warn_probe(const struct device *dev, int err, const char *fmt, ...); /* Simple helper for dev_err_probe() when ERR_PTR() is to be returned. */ #define dev_err_ptr_probe(dev, ___err, fmt, ...) \ diff --git a/include/linux/dim.h b/include/linux/dim.h index 1b581ff25a15..06543fd40fcc 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -351,7 +351,8 @@ void dim_park_tired(struct dim *dim); * Takes into consideration counter wrap-around. * Returned boolean indicates whether curr_stats are reliable. */ -bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end, +bool dim_calc_stats(const struct dim_sample *start, + const struct dim_sample *end, struct dim_stats *curr_stats); /** @@ -424,7 +425,7 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode); * This is the main logic of the algorithm, where data is processed in order * to decide on next required action. */ -void net_dim(struct dim *dim, struct dim_sample end_sample); +void net_dim(struct dim *dim, const struct dim_sample *end_sample); /* RDMA DIM */ diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index e06bad467f55..e7ad819962e3 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -574,6 +574,12 @@ int dma_fence_get_status(struct dma_fence *fence); * rather than success. This must be set before signaling (so that the value * is visible before any waiters on the signal callback are woken). This * helper exists to help catching erroneous setting of #dma_fence.error. + * + * Examples of error codes which drivers should use: + * + * * %-ENODATA This operation produced no data, no other operation affected. + * * %-ECANCELED All operations from the same context have been canceled. + * * %-ETIME Operation caused a timeout and potentially device reset. */ static inline void dma_fence_set_error(struct dma_fence *fence, int error) diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index b7773201414c..e172522cd936 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -242,7 +242,7 @@ static inline bool dev_is_dma_coherent(struct device *dev) { return true; } -#endif /* CONFIG_ARCH_HAS_DMA_COHERENCE_H */ +#endif static inline void dma_reset_need_sync(struct device *dev) { diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 1524da363734..b79925b1c433 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -2,15 +2,11 @@ #ifndef _LINUX_DMA_MAPPING_H #define _LINUX_DMA_MAPPING_H -#include <linux/cache.h> -#include <linux/sizes.h> -#include <linux/string.h> #include <linux/device.h> #include <linux/err.h> #include <linux/dma-direction.h> #include <linux/scatterlist.h> #include <linux/bug.h> -#include <linux/mem_encrypt.h> /** * List of possible attributes associated with a DMA mapping. The semantics diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index 8d0e34dad446..c5ab6fd9ebe8 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -105,10 +105,10 @@ enum dma_resv_usage { * This should be used by submissions which don't want to participate in * any implicit synchronization. * - * The most common case are preemption fences, page table updates, TLB - * flushes as well as explicit synced user submissions. + * The most common cases are preemption fences, page table updates, TLB + * flushes as well as explicitly synced user submissions. * - * Explicit synced user user submissions can be promoted to + * Explicitly synced user submissions can be promoted to * DMA_RESV_USAGE_READ or DMA_RESV_USAGE_WRITE as needed using * dma_buf_import_sync_file() when implicit synchronization should * become necessary after initial adding of the fence. diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 499bb2c63483..692b2b445761 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -292,7 +292,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src) struct irq_data; extern void dmar_msi_unmask(struct irq_data *data); extern void dmar_msi_mask(struct irq_data *data); -extern void dmar_msi_read(int irq, struct msi_msg *msg); extern void dmar_msi_write(int irq, struct msi_msg *msg); extern int dmar_set_interrupt(struct intel_iommu *iommu); extern irqreturn_t dmar_fault(int irq, void *dev_id); diff --git a/include/linux/dpll.h b/include/linux/dpll.h index 81f7b623d0ba..5e4f9ab1cf75 100644 --- a/include/linux/dpll.h +++ b/include/linux/dpll.h @@ -26,6 +26,10 @@ struct dpll_device_ops { struct netlink_ext_ack *extack); int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv, s32 *temp, struct netlink_ext_ack *extack); + int (*clock_quality_level_get)(const struct dpll_device *dpll, + void *dpll_priv, + unsigned long *qls, + struct netlink_ext_ack *extack); }; struct dpll_pin_ops { diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h index 82ebf9223948..f8811c46b89e 100644 --- a/include/linux/dw_apb_timer.h +++ b/include/linux/dw_apb_timer.h @@ -34,9 +34,6 @@ struct dw_apb_clocksource { }; void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced); -void dw_apb_clockevent_pause(struct dw_apb_clock_event_device *dw_ced); -void dw_apb_clockevent_resume(struct dw_apb_clock_event_device *dw_ced); -void dw_apb_clockevent_stop(struct dw_apb_clock_event_device *dw_ced); struct dw_apb_clock_event_device * dw_apb_clockevent_init(int cpu, const char *name, unsigned rating, diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h index 281298e77a15..808b1a5102e7 100644 --- a/include/linux/dynamic_queue_limits.h +++ b/include/linux/dynamic_queue_limits.h @@ -127,7 +127,7 @@ static inline void dql_queued(struct dql *dql, unsigned int count) if (WARN_ON_ONCE(count > DQL_MAX_OBJECT)) return; - dql->last_obj_cnt = count; + WRITE_ONCE(dql->last_obj_cnt, count); /* We want to force a write first, so that cpu do not attempt * to get cache line containing last_obj_cnt, num_queued, adj_limit diff --git a/include/linux/eeprom_93cx6.h b/include/linux/eeprom_93cx6.h index c860c72a921d..3a485cc0e0fa 100644 --- a/include/linux/eeprom_93cx6.h +++ b/include/linux/eeprom_93cx6.h @@ -11,6 +11,8 @@ Supported chipsets: 93c46, 93c56 and 93c66. */ +#include <linux/bits.h> + /* * EEPROM operation defines. */ @@ -34,6 +36,7 @@ * @register_write(struct eeprom_93cx6 *eeprom): handler to * write to the eeprom register by using all reg_* fields. * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines + * @quirks: eeprom or controller quirks * @drive_data: Set if we're driving the data line. * @reg_data_in: register field to indicate data input * @reg_data_out: register field to indicate data output @@ -50,6 +53,9 @@ struct eeprom_93cx6 { void (*register_write)(struct eeprom_93cx6 *eeprom); int width; + unsigned int quirks; +/* Some EEPROMs require an extra clock cycle before reading */ +#define PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE BIT(0) char drive_data; char reg_data_in; @@ -71,3 +77,8 @@ extern void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable); extern void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data); + +static inline bool has_quirk_extra_read_cycle(struct eeprom_93cx6 *eeprom) +{ + return eeprom->quirks & PCI_EEPROM_QUIRK_EXTRA_READ_CYCLE; +} diff --git a/include/linux/efi.h b/include/linux/efi.h index e28d88066033..e5815867aba9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -379,7 +379,6 @@ void efi_native_runtime_setup(void); #define EFI_SYSTEM_RESOURCE_TABLE_GUID EFI_GUID(0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80) #define EFI_FILE_SYSTEM_GUID EFI_GUID(0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b) #define DEVICE_TREE_GUID EFI_GUID(0xb1b621d5, 0xf19c, 0x41a5, 0x83, 0x0b, 0xd9, 0x15, 0x2c, 0x69, 0xaa, 0xe0) -#define EFI_PROPERTIES_TABLE_GUID EFI_GUID(0x880aaca3, 0x4adc, 0x4a04, 0x90, 0x79, 0xb7, 0x47, 0x34, 0x08, 0x25, 0xe5) #define EFI_RNG_PROTOCOL_GUID EFI_GUID(0x3152bca5, 0xeade, 0x433d, 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44) #define EFI_RNG_ALGORITHM_RAW EFI_GUID(0xe43176d7, 0xb6e8, 0x4827, 0xb7, 0x84, 0x7f, 0xfd, 0xc4, 0xb6, 0x85, 0x61) #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) @@ -581,15 +580,6 @@ struct efi_mem_range { }; typedef struct { - u32 version; - u32 length; - u64 memory_protection_attribute; -} efi_properties_table_t; - -#define EFI_PROPERTIES_TABLE_VERSION 0x00010000 -#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1 - -typedef struct { u16 version; u16 length; u32 runtime_services_supported; @@ -871,10 +861,9 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len) #define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ #define EFI_ARCH_1 7 /* First arch-specific bit */ #define EFI_DBG 8 /* Print additional debug info at runtime */ -#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */ -#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ -#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */ -#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */ +#define EFI_MEM_ATTR 9 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */ +#define EFI_MEM_NO_SOFT_RESERVE 10 /* Is the kernel configured to ignore soft reservations? */ +#define EFI_PRESERVE_BS_REGIONS 11 /* Are EFI boot-services memory segments available? */ #ifdef CONFIG_EFI /* diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 1ff52020cf75..752e0b297582 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -55,6 +55,8 @@ struct em_perf_table { * struct em_perf_domain - Performance domain * @em_table: Pointer to the runtime modifiable em_perf_table * @nr_perf_states: Number of performance states + * @min_perf_state: Minimum allowed Performance State index + * @max_perf_state: Maximum allowed Performance State index * @flags: See "em_perf_domain flags" * @cpus: Cpumask covering the CPUs of the domain. It's here * for performance reasons to avoid potential cache @@ -70,6 +72,8 @@ struct em_perf_table { struct em_perf_domain { struct em_perf_table __rcu *em_table; int nr_perf_states; + int min_perf_state; + int max_perf_state; unsigned long flags; unsigned long cpus[]; }; @@ -173,13 +177,14 @@ void em_table_free(struct em_perf_table __rcu *table); int em_dev_compute_costs(struct device *dev, struct em_perf_state *table, int nr_states); int em_dev_update_chip_binning(struct device *dev); +int em_update_performance_limits(struct em_perf_domain *pd, + unsigned long freq_min_khz, unsigned long freq_max_khz); /** * em_pd_get_efficient_state() - Get an efficient performance state from the EM * @table: List of performance states, in ascending order - * @nr_perf_states: Number of performance states + * @pd: performance domain for which this must be done * @max_util: Max utilization to map with the EM - * @pd_flags: Performance Domain flags * * It is called from the scheduler code quite frequently and as a consequence * doesn't implement any check. @@ -188,13 +193,16 @@ int em_dev_update_chip_binning(struct device *dev); * requirement. */ static inline int -em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states, - unsigned long max_util, unsigned long pd_flags) +em_pd_get_efficient_state(struct em_perf_state *table, + struct em_perf_domain *pd, unsigned long max_util) { + unsigned long pd_flags = pd->flags; + int min_ps = pd->min_perf_state; + int max_ps = pd->max_perf_state; struct em_perf_state *ps; int i; - for (i = 0; i < nr_perf_states; i++) { + for (i = min_ps; i <= max_ps; i++) { ps = &table[i]; if (ps->performance >= max_util) { if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES && @@ -204,7 +212,7 @@ em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states, } } - return nr_perf_states - 1; + return max_ps; } /** @@ -253,8 +261,7 @@ static inline unsigned long em_cpu_energy(struct em_perf_domain *pd, * requested performance. */ em_table = rcu_dereference(pd->em_table); - i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states, - max_util, pd->flags); + i = em_pd_get_efficient_state(em_table->state, pd, max_util); ps = &em_table->state[i]; /* @@ -391,6 +398,12 @@ static inline int em_dev_update_chip_binning(struct device *dev) { return -EINVAL; } +static inline +int em_update_performance_limits(struct em_perf_domain *pd, + unsigned long freq_min_khz, unsigned long freq_max_khz) +{ + return -EINVAL; +} #endif #endif diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index 1e50cdb83ae5..fc61d0205c97 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -64,7 +64,8 @@ #define EXIT_TO_USER_MODE_WORK \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ + _TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL | \ ARCH_EXIT_TO_USER_MODE_WORK) /** diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 6813171afccb..16149f6625e4 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -17,8 +17,9 @@ #endif #define XFER_TO_GUEST_MODE_WORK \ - (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL | \ - _TIF_NOTIFY_RESUME | ARCH_XFER_TO_GUEST_MODE_WORK) + (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY | _TIF_SIGPENDING | \ + _TIF_NOTIFY_SIGNAL | _TIF_NOTIFY_RESUME | \ + ARCH_XFER_TO_GUEST_MODE_WORK) struct kvm_vcpu; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 12f6dc567598..b8b935b52603 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -734,6 +734,9 @@ struct kernel_ethtool_ts_info { * @rxfh_per_ctx_key: device supports setting different RSS key for each * additional context. Netlink API should report hfunc, key, and input_xfrm * for every context, not just context 0. + * @cap_rss_rxnfc_adds: device supports nonzero ring_cookie in filters with + * %FLOW_RSS flag; the queue ID from the filter is added to the value from + * the indirection table to determine the delivery queue. * @rxfh_indir_space: max size of RSS indirection tables, if indirection table * size as returned by @get_rxfh_indir_size may change during lifetime * of the device. Leave as 0 if the table size is constant. @@ -956,6 +959,7 @@ struct ethtool_ops { u32 cap_rss_ctx_supported:1; u32 cap_rss_sym_xor_supported:1; u32 rxfh_per_ctx_key:1; + u32 cap_rss_rxnfc_adds:1; u32 rxfh_indir_space; u16 rxfh_key_space; u16 rxfh_priv_size; diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 3337745d81bd..0c0d00fcd131 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -42,7 +42,7 @@ static inline void eventpoll_release(struct file *file) * because the file in on the way to be removed and nobody ( but * eventpoll ) has still a reference to this file. */ - if (likely(!file->f_ep)) + if (likely(!READ_ONCE(file->f_ep))) return; /* diff --git a/include/linux/execmem.h b/include/linux/execmem.h index 32cef1144117..64130ae19690 100644 --- a/include/linux/execmem.h +++ b/include/linux/execmem.h @@ -46,11 +46,27 @@ enum execmem_type { /** * enum execmem_range_flags - options for executable memory allocations * @EXECMEM_KASAN_SHADOW: allocate kasan shadow + * @EXECMEM_ROX_CACHE: allocations should use ROX cache of huge pages */ enum execmem_range_flags { EXECMEM_KASAN_SHADOW = (1 << 0), + EXECMEM_ROX_CACHE = (1 << 1), }; +#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX +/** + * execmem_fill_trapping_insns - set memory to contain instructions that + * will trap + * @ptr: pointer to memory to fill + * @size: size of the range to fill + * @writable: is the memory poited by @ptr is writable or ROX + * + * A hook for architecures to fill execmem ranges with invalid instructions. + * Architectures that use EXECMEM_ROX_CACHE must implement this. + */ +void execmem_fill_trapping_insns(void *ptr, size_t size, bool writable); +#endif + /** * struct execmem_range - definition of an address space suitable for code and * related data allocations @@ -123,6 +139,39 @@ void *execmem_alloc(enum execmem_type type, size_t size); */ void execmem_free(void *ptr); +#ifdef CONFIG_MMU +/** + * execmem_vmap - create virtual mapping for EXECMEM_MODULE_DATA memory + * @size: size of the virtual mapping in bytes + * + * Maps virtually contiguous area in the range suitable for EXECMEM_MODULE_DATA. + * + * Return: the area descriptor on success or %NULL on failure. + */ +struct vm_struct *execmem_vmap(size_t size); +#endif + +/** + * execmem_update_copy - copy an update to executable memory + * @dst: destination address to update + * @src: source address containing the data + * @size: how many bytes of memory shold be copied + * + * Copy @size bytes from @src to @dst using text poking if the memory at + * @dst is read-only. + * + * Return: a pointer to @dst or NULL on error + */ +void *execmem_update_copy(void *dst, const void *src, size_t size); + +/** + * execmem_is_rox - check if execmem is read-only + * @type - the execmem type to check + * + * Return: %true if the @type is read-only, %false if it's writable + */ +bool execmem_is_rox(enum execmem_type type); + #if defined(CONFIG_EXECMEM) && !defined(CONFIG_ARCH_WANTS_EXECMEM_LATE) void execmem_init(void); #else diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index 893a1d21dc1c..4cc8801e50e3 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -160,6 +160,19 @@ struct fid { #define EXPORT_FH_FID 0x2 /* File handle may be non-decodeable */ #define EXPORT_FH_DIR_ONLY 0x4 /* Only decode file handle for a directory */ +/* + * Filesystems use only lower 8 bits of file_handle type for fid_type. + * name_to_handle_at() uses upper 16 bits of type as user flags to be + * interpreted by open_by_handle_at(). + */ +#define FILEID_USER_FLAGS_MASK 0xffff0000 +#define FILEID_USER_FLAGS(type) ((type) & FILEID_USER_FLAGS_MASK) + +/* Flags supported in encoded handle_type that is exported to user */ +#define FILEID_IS_CONNECTABLE 0x10000 +#define FILEID_IS_DIR 0x20000 +#define FILEID_VALID_USER_FLAGS (FILEID_IS_CONNECTABLE | FILEID_IS_DIR) + /** * struct export_operations - for nfsd to communicate with file systems * @encode_fh: encode a file handle fragment from a dentry @@ -250,19 +263,6 @@ struct export_operations { unsigned long flags; }; -/** - * exportfs_lock_op_is_async() - export op supports async lock operation - * @export_ops: the nfs export operations to check - * - * Returns true if the nfs export_operations structure has - * EXPORT_OP_ASYNC_LOCK in their flags set - */ -static inline bool -exportfs_lock_op_is_async(const struct export_operations *export_ops) -{ - return export_ops->flags & EXPORT_OP_ASYNC_LOCK; -} - extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid, int *max_len, struct inode *parent, int flags); diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index b0b821edfd97..c24f8bc01045 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -24,10 +24,11 @@ #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ #define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */ -#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS) -#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS) +#define F2FS_BLKSIZE_MASK (F2FS_BLKSIZE - 1) +#define F2FS_BYTES_TO_BLK(bytes) ((unsigned long long)(bytes) >> F2FS_BLKSIZE_BITS) +#define F2FS_BLK_TO_BYTES(blk) ((unsigned long long)(blk) << F2FS_BLKSIZE_BITS) #define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1) -#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1)) +#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1)) /* 0, 1(node nid), 2(meta nid) are reserved node id */ #define F2FS_RESERVED_NODE_NUM 3 diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 4f1c4f603118..89ff45bd6f01 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -36,6 +36,7 @@ #define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \ FAN_REPORT_TID | \ FAN_REPORT_PIDFD | \ + FAN_REPORT_FD_ERROR | \ FAN_UNLIMITED_QUEUE | \ FAN_UNLIMITED_MARKS) diff --git a/include/linux/fb.h b/include/linux/fb.h index 267b59ead432..5ba187e08cf7 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -21,6 +21,7 @@ struct fb_info; struct file; struct i2c_adapter; struct inode; +struct lcd_device; struct module; struct notifier_block; struct page; @@ -480,6 +481,13 @@ struct fb_info { struct mutex bl_curve_mutex; u8 bl_curve[FB_BACKLIGHT_LEVELS]; #endif + + /* + * Assigned LCD device; set before framebuffer + * registration, remove after unregister + */ + struct lcd_device *lcd_dev; + #ifdef CONFIG_FB_DEFERRED_IO struct delayed_work deferred_work; unsigned long npagerefs; @@ -754,6 +762,11 @@ static inline struct backlight_device *fb_bl_device(struct fb_info *info) } #endif +static inline struct lcd_device *fb_lcd_device(struct fb_info *info) +{ + return info->lcd_dev; +} + /* fbmon.c */ #define FB_MAXTIMINGS 0 #define FB_VSYNCTIMINGS 1 diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index b1c5722f2b3c..c45306a9f007 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h @@ -92,10 +92,6 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un return files_lookup_fd_raw(files, fd); } -struct file *lookup_fdget_rcu(unsigned int fd); -struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd); -struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd); - static inline bool close_on_exec(unsigned int fd, const struct files_struct *files) { return test_bit(fd, files_fdtable(files)->close_on_exec); @@ -115,7 +111,6 @@ int iterate_fd(struct files_struct *, unsigned, const void *); extern int close_fd(unsigned int fd); -extern int __close_range(unsigned int fd, unsigned int max_fd, unsigned int flags); extern struct file *file_close_fd(unsigned int fd); extern struct kmem_cache *files_cachep; diff --git a/include/linux/file.h b/include/linux/file.h index f98de143245a..302f11355b10 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -30,12 +30,6 @@ extern struct file *alloc_file_pseudo_noaccount(struct inode *, struct vfsmount extern struct file *alloc_file_clone(struct file *, int flags, const struct file_operations *); -static inline void fput_light(struct file *file, int fput_needed) -{ - if (fput_needed) - fput(file); -} - /* either a reference to struct file + flags * (cloned vs. borrowed, pos locked), with * flags stored in lower bits of value, @@ -72,6 +66,7 @@ static inline void fdput(struct fd fd) extern struct file *fget(unsigned int fd); extern struct file *fget_raw(unsigned int fd); extern struct file *fget_task(struct task_struct *task, unsigned int fd); +extern struct file *fget_task_next(struct task_struct *task, unsigned int *fd); extern void __f_unlock_pos(struct file *); struct fd fdget(unsigned int fd); @@ -87,6 +82,7 @@ static inline void fdput_pos(struct fd f) DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd) DEFINE_CLASS(fd_raw, struct fd, fdput(_T), fdget_raw(fd), int fd) +DEFINE_CLASS(fd_pos, struct fd, fdput_pos(_T), fdget_pos(fd), int fd) extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); extern int replace_fd(unsigned fd, struct file *file, unsigned flags); diff --git a/include/linux/file_ref.h b/include/linux/file_ref.h new file mode 100644 index 000000000000..9b3a8d9b17ab --- /dev/null +++ b/include/linux/file_ref.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _LINUX_FILE_REF_H +#define _LINUX_FILE_REF_H + +#include <linux/atomic.h> +#include <linux/preempt.h> +#include <linux/types.h> + +/* + * file_ref is a reference count implementation specifically for use by + * files. It takes inspiration from rcuref but differs in key aspects + * such as support for SLAB_TYPESAFE_BY_RCU type caches. + * + * FILE_REF_ONEREF FILE_REF_MAXREF + * 0x0000000000000000UL 0x7FFFFFFFFFFFFFFFUL + * <-------------------valid -------------------> + * + * FILE_REF_SATURATED + * 0x8000000000000000UL 0xA000000000000000UL 0xBFFFFFFFFFFFFFFFUL + * <-----------------------saturation zone----------------------> + * + * FILE_REF_RELEASED FILE_REF_DEAD + * 0xC000000000000000UL 0xE000000000000000UL + * <-------------------dead zone-------------------> + * + * FILE_REF_NOREF + * 0xFFFFFFFFFFFFFFFFUL + */ + +#ifdef CONFIG_64BIT +#define FILE_REF_ONEREF 0x0000000000000000UL +#define FILE_REF_MAXREF 0x7FFFFFFFFFFFFFFFUL +#define FILE_REF_SATURATED 0xA000000000000000UL +#define FILE_REF_RELEASED 0xC000000000000000UL +#define FILE_REF_DEAD 0xE000000000000000UL +#define FILE_REF_NOREF 0xFFFFFFFFFFFFFFFFUL +#else +#define FILE_REF_ONEREF 0x00000000U +#define FILE_REF_MAXREF 0x7FFFFFFFU +#define FILE_REF_SATURATED 0xA0000000U +#define FILE_REF_RELEASED 0xC0000000U +#define FILE_REF_DEAD 0xE0000000U +#define FILE_REF_NOREF 0xFFFFFFFFU +#endif + +typedef struct { +#ifdef CONFIG_64BIT + atomic64_t refcnt; +#else + atomic_t refcnt; +#endif +} file_ref_t; + +/** + * file_ref_init - Initialize a file reference count + * @ref: Pointer to the reference count + * @cnt: The initial reference count typically '1' + */ +static inline void file_ref_init(file_ref_t *ref, unsigned long cnt) +{ + atomic_long_set(&ref->refcnt, cnt - 1); +} + +bool __file_ref_put(file_ref_t *ref, unsigned long cnt); + +/** + * file_ref_get - Acquire one reference on a file + * @ref: Pointer to the reference count + * + * Similar to atomic_inc_not_zero() but saturates at FILE_REF_MAXREF. + * + * Provides full memory ordering. + * + * Return: False if the attempt to acquire a reference failed. This happens + * when the last reference has been put already. True if a reference + * was successfully acquired + */ +static __always_inline __must_check bool file_ref_get(file_ref_t *ref) +{ + /* + * Unconditionally increase the reference count with full + * ordering. The saturation and dead zones provide enough + * tolerance for this. + * + * If this indicates negative the file in question the fail can + * be freed and immediately reused due to SLAB_TYPSAFE_BY_RCU. + * Hence, unconditionally altering the file reference count to + * e.g., reset the file reference count back to the middle of + * the deadzone risk end up marking someone else's file as dead + * behind their back. + * + * It would be possible to do a careful: + * + * cnt = atomic_long_inc_return(); + * if (likely(cnt >= 0)) + * return true; + * + * and then something like: + * + * if (cnt >= FILE_REF_RELEASE) + * atomic_long_try_cmpxchg(&ref->refcnt, &cnt, FILE_REF_DEAD), + * + * to set the value back to the middle of the deadzone. But it's + * practically impossible to go from FILE_REF_DEAD to + * FILE_REF_ONEREF. It would need 2305843009213693952/2^61 + * file_ref_get()s to resurrect such a dead file. + */ + return !atomic_long_add_negative(1, &ref->refcnt); +} + +/** + * file_ref_inc - Acquire one reference on a file + * @ref: Pointer to the reference count + * + * Acquire an additional reference on a file. Warns if the caller didn't + * already hold a reference. + */ +static __always_inline void file_ref_inc(file_ref_t *ref) +{ + long prior = atomic_long_fetch_inc_relaxed(&ref->refcnt); + WARN_ONCE(prior < 0, "file_ref_inc() on a released file reference"); +} + +/** + * file_ref_put -- Release a file reference + * @ref: Pointer to the reference count + * + * Provides release memory ordering, such that prior loads and stores + * are done before, and provides an acquire ordering on success such + * that free() must come after. + * + * Return: True if this was the last reference with no future references + * possible. This signals the caller that it can safely release + * the object which is protected by the reference counter. + * False if there are still active references or the put() raced + * with a concurrent get()/put() pair. Caller is not allowed to + * release the protected object. + */ +static __always_inline __must_check bool file_ref_put(file_ref_t *ref) +{ + long cnt; + + /* + * While files are SLAB_TYPESAFE_BY_RCU and thus file_ref_put() + * calls don't risk UAFs when a file is recyclyed, it is still + * vulnerable to UAFs caused by freeing the whole slab page once + * it becomes unused. Prevent file_ref_put() from being + * preempted protects against this. + */ + guard(preempt)(); + /* + * Unconditionally decrease the reference count. The saturation + * and dead zones provide enough tolerance for this. If this + * fails then we need to handle the last reference drop and + * cases inside the saturation and dead zones. + */ + cnt = atomic_long_dec_return(&ref->refcnt); + if (cnt >= 0) + return false; + return __file_ref_put(ref, cnt); +} + +/** + * file_ref_read - Read the number of file references + * @ref: Pointer to the reference count + * + * Return: The number of held references (0 ... N) + */ +static inline unsigned long file_ref_read(file_ref_t *ref) +{ + unsigned long c = atomic_long_read(&ref->refcnt); + + /* Return 0 if within the DEAD zone. */ + return c >= FILE_REF_RELEASED ? 0 : c + 1; +} + +#endif diff --git a/include/linux/filelock.h b/include/linux/filelock.h index bb44224c6676..c412ded9171e 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -180,6 +180,11 @@ static inline void locks_wake_up(struct file_lock *fl) wake_up(&fl->c.flc_wait); } +static inline bool locks_can_async_lock(const struct file_operations *fops) +{ + return !fops->lock || fops->fop_flags & FOP_ASYNC_LOCK; +} + /* fs/locks.c */ void locks_free_lock_context(struct inode *inode); void locks_free_lock(struct file_lock *fl); diff --git a/include/linux/filter.h b/include/linux/filter.h index 7d7578a8eac1..3a21947f2fd4 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -1119,6 +1119,7 @@ bool bpf_jit_supports_exceptions(void); bool bpf_jit_supports_ptr_xchg(void); bool bpf_jit_supports_arena(void); bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena); +bool bpf_jit_supports_private_stack(void); u64 bpf_arch_uaddress_limit(void); void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie); bool bpf_helper_changes_pkt_data(void *func); diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h index 9f14976399ab..4621aec0328c 100644 --- a/include/linux/firmware/qcom/qcom_scm.h +++ b/include/linux/firmware/qcom/qcom_scm.h @@ -85,6 +85,8 @@ int qcom_scm_io_writel(phys_addr_t addr, unsigned int val); bool qcom_scm_restore_sec_cfg_available(void); int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare); +int qcom_scm_set_gpu_smmu_aperture(unsigned int context_bank); +bool qcom_scm_set_gpu_smmu_aperture_is_available(void); int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size); int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare); int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size); diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index d7d07afc0532..6d4dbc196b93 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -3,7 +3,7 @@ * Xilinx Zynq MPSoC Firmware layer * * Copyright (C) 2014-2021 Xilinx - * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc. + * Copyright (C) 2022 - 2024, Advanced Micro Devices, Inc. * * Michal Simek <michal.simek@amd.com> * Davorin Mista <davorin.mista@aggios.com> @@ -32,6 +32,19 @@ /* SMC SIP service Call Function Identifier Prefix */ #define PM_SIP_SVC 0xC2000000 +/* SMC function ID to get SiP SVC version */ +#define GET_SIP_SVC_VERSION (0x8200ff03U) + +/* SiP Service Calls version numbers */ +#define SIP_SVC_VERSION_MAJOR (0U) +#define SIP_SVC_VERSION_MINOR (2U) + +#define SIP_SVC_PASSTHROUGH_VERSION ((SIP_SVC_VERSION_MAJOR << 16) | \ + SIP_SVC_VERSION_MINOR) + +/* Fixed ID for FW specific APIs */ +#define PASS_THROUGH_FW_CMD_ID GENMASK(11, 0) + /* PM API versions */ #define PM_API_VERSION_1 1 #define PM_API_VERSION_2 2 @@ -51,6 +64,7 @@ #define API_ID_MASK GENMASK(7, 0) #define MODULE_ID_MASK GENMASK(11, 8) +#define PLM_MODULE_ID_MASK GENMASK(15, 8) /* Firmware feature check version mask */ #define FIRMWARE_VERSION_MASK 0xFFFFU @@ -62,7 +76,13 @@ #define GET_CALLBACK_DATA 0xa01 /* Number of 32bits values in payload */ -#define PAYLOAD_ARG_CNT 4U +#define PAYLOAD_ARG_CNT 7U + +/* Number of 64bits arguments for SMC call */ +#define SMC_ARG_CNT_64 8U + +/* Number of 32bits arguments for SMC call */ +#define SMC_ARG_CNT_32 13U /* Number of arguments for a callback */ #define CB_ARG_CNT 4 @@ -130,6 +150,7 @@ enum pm_module_id { PM_MODULE_ID = 0x0, + XPM_MODULE_ID = 0x2, XSEM_MODULE_ID = 0x3, TF_A_MODULE_ID = 0xa, }; @@ -218,9 +239,13 @@ enum pm_ioctl_id { /* Runtime feature configuration */ IOCTL_SET_FEATURE_CONFIG = 26, IOCTL_GET_FEATURE_CONFIG = 27, + /* IOCTL for Secure Read/Write Interface */ + IOCTL_READ_REG = 28, /* Dynamic SD/GEM configuration */ IOCTL_SET_SD_CONFIG = 30, IOCTL_SET_GEM_CONFIG = 31, + /* IOCTL to get default/current QoS */ + IOCTL_GET_QOS = 34, }; enum pm_query_id { @@ -238,6 +263,7 @@ enum pm_query_id { PM_QID_PINCTRL_GET_PIN_GROUPS = 11, PM_QID_CLOCK_GET_NUM_CLOCKS = 12, PM_QID_CLOCK_GET_MAX_DIVISOR = 13, + PM_QID_PINCTRL_GET_ATTRIBUTES = 15, }; enum rpu_oper_mode { @@ -533,6 +559,7 @@ struct zynqmp_pm_query_data { }; int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); +int zynqmp_pm_invoke_fw_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) int zynqmp_pm_get_api_version(u32 *version); @@ -553,9 +580,9 @@ int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data); int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value); int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type); int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select); -int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, +int zynqmp_pm_reset_assert(const u32 reset, const enum zynqmp_pm_reset_action assert_flag); -int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status); +int zynqmp_pm_reset_get_status(const u32 reset, u32 *status); unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode); int zynqmp_pm_bootmode_write(u32 ps_mode); int zynqmp_pm_init_finalize(void); @@ -698,14 +725,13 @@ static inline int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select) return -ENODEV; } -static inline int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, +static inline int zynqmp_pm_reset_assert(const u32 reset, const enum zynqmp_pm_reset_action assert_flag) { return -ENODEV; } -static inline int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, - u32 *status) +static inline int zynqmp_pm_reset_get_status(const u32 reset, u32 *status) { return -ENODEV; } diff --git a/include/linux/fs.h b/include/linux/fs.h index 3559446279c1..7e29433c5ecc 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -45,6 +45,8 @@ #include <linux/slab.h> #include <linux/maple_tree.h> #include <linux/rw_hint.h> +#include <linux/file_ref.h> +#include <linux/unicode.h> #include <asm/byteorder.h> #include <uapi/linux/fs.h> @@ -623,6 +625,7 @@ is_uncached_acl(struct posix_acl *acl) #define IOP_NOFOLLOW 0x0004 #define IOP_XATTR 0x0008 #define IOP_DEFAULT_READLINK 0x0010 +#define IOP_MGTIME 0x0020 /* * Keep mostly read-only and often accessed (especially for @@ -1005,7 +1008,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) /** * struct file - Represents a file - * @f_count: reference count + * @f_ref: reference count * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context. * @f_mode: FMODE_* flags often used in hotpaths * @f_op: file operations @@ -1030,7 +1033,7 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.) */ struct file { - atomic_long_t f_count; + file_ref_t f_ref; spinlock_t f_lock; fmode_t f_mode; const struct file_operations *f_op; @@ -1078,15 +1081,14 @@ struct file_handle { static inline struct file *get_file(struct file *f) { - long prior = atomic_long_fetch_inc_relaxed(&f->f_count); - WARN_ONCE(!prior, "struct file::f_count incremented from zero; use-after-free condition present!\n"); + file_ref_inc(&f->f_ref); return f; } struct file *get_file_rcu(struct file __rcu **f); struct file *get_file_active(struct file **f); -#define file_count(x) atomic_long_read(&(x)->f_count) +#define file_count(f) file_ref_read(&(f)->f_ref) #define MAX_NON_LFS ((1UL<<31) - 1) @@ -1584,6 +1586,8 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb, struct timespec64 current_time(struct inode *inode); struct timespec64 inode_set_ctime_current(struct inode *inode); +struct timespec64 inode_set_ctime_deleg(struct inode *inode, + struct timespec64 update); static inline time64_t inode_get_atime_sec(const struct inode *inode) { @@ -1653,6 +1657,17 @@ static inline struct timespec64 inode_set_mtime(struct inode *inode, return inode_set_mtime_to_ts(inode, ts); } +/* + * Multigrain timestamps + * + * Conditionally use fine-grained ctime and mtime timestamps when there + * are users actively observing them via getattr. The primary use-case + * for this is NFS clients that use the ctime to distinguish between + * different states of the file, and that are often fooled by multiple + * operations that occur in the same coarse-grained timer tick. + */ +#define I_CTIME_QUERIED ((u32)BIT(31)) + static inline time64_t inode_get_ctime_sec(const struct inode *inode) { return inode->i_ctime_sec; @@ -1660,7 +1675,7 @@ static inline time64_t inode_get_ctime_sec(const struct inode *inode) static inline long inode_get_ctime_nsec(const struct inode *inode) { - return inode->i_ctime_nsec; + return inode->i_ctime_nsec & ~I_CTIME_QUERIED; } static inline struct timespec64 inode_get_ctime(const struct inode *inode) @@ -1671,13 +1686,7 @@ static inline struct timespec64 inode_get_ctime(const struct inode *inode) return ts; } -static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode, - struct timespec64 ts) -{ - inode->i_ctime_sec = ts.tv_sec; - inode->i_ctime_nsec = ts.tv_nsec; - return ts; -} +struct timespec64 inode_set_ctime_to_ts(struct inode *inode, struct timespec64 ts); /** * inode_set_ctime - set the ctime in the inode @@ -2116,6 +2125,8 @@ struct file_operations { #define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4)) /* Treat loff_t as unsigned (e.g., /dev/mem) */ #define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5)) +/* Supports asynchronous lock callbacks */ +#define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6)) /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, @@ -2542,6 +2553,7 @@ struct file_system_type { #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ +#define FS_MGTIME 64 /* FS uses multigrain timestamps */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ int (*init_fs_context)(struct fs_context *); const struct fs_parameter_spec *parameters; @@ -2565,6 +2577,17 @@ struct file_system_type { #define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME) +/** + * is_mgtime: is this inode using multigrain timestamps + * @inode: inode to test for multigrain timestamps + * + * Return true if the inode uses multigrain timestamps, false otherwise. + */ +static inline bool is_mgtime(const struct inode *inode) +{ + return inode->i_opflags & IOP_MGTIME; +} + extern struct dentry *mount_bdev(struct file_system_type *fs_type, int flags, const char *dev_name, void *data, int (*fill_super)(struct super_block *, void *, int)); @@ -2766,6 +2789,16 @@ extern struct filename *getname_flags(const char __user *, int); extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); +extern struct filename *__getname_maybe_null(const char __user *); +static inline struct filename *getname_maybe_null(const char __user *name, int flags) +{ + if (!(flags & AT_EMPTY_PATH)) + return getname(name); + + if (!name) + return NULL; + return __getname_maybe_null(name); +} extern void putname(struct filename *name); extern int finish_open(struct file *file, struct dentry *dentry, @@ -3326,6 +3359,7 @@ extern void page_put_link(void *); extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); +void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode); void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *); void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); void generic_fill_statx_atomic_writes(struct kstat *stat, @@ -3456,6 +3490,54 @@ extern int generic_ci_match(const struct inode *parent, const struct qstr *folded_name, const u8 *de_name, u32 de_name_len); +#if IS_ENABLED(CONFIG_UNICODE) +int generic_ci_d_hash(const struct dentry *dentry, struct qstr *str); +int generic_ci_d_compare(const struct dentry *dentry, unsigned int len, + const char *str, const struct qstr *name); + +/** + * generic_ci_validate_strict_name - Check if a given name is suitable + * for a directory + * + * This functions checks if the proposed filename is valid for the + * parent directory. That means that only valid UTF-8 filenames will be + * accepted for casefold directories from filesystems created with the + * strict encoding flag. That also means that any name will be + * accepted for directories that doesn't have casefold enabled, or + * aren't being strict with the encoding. + * + * @dir: inode of the directory where the new file will be created + * @name: name of the new file + * + * Return: + * * True: if the filename is suitable for this directory. It can be + * true if a given name is not suitable for a strict encoding + * directory, but the directory being used isn't strict + * * False if the filename isn't suitable for this directory. This only + * happens when a directory is casefolded and the filesystem is strict + * about its encoding. + */ +static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name) +{ + if (!IS_CASEFOLDED(dir) || !sb_has_strict_encoding(dir->i_sb)) + return true; + + /* + * A casefold dir must have a encoding set, unless the filesystem + * is corrupted + */ + if (WARN_ON_ONCE(!dir->i_sb->s_encoding)) + return true; + + return !utf8_validate(dir->i_sb->s_encoding, name); +} +#else +static inline bool generic_ci_validate_strict_name(struct inode *dir, struct qstr *name) +{ + return true; +} +#endif + static inline bool sb_has_encoding(const struct super_block *sb) { #if IS_ENABLED(CONFIG_UNICODE) @@ -3726,6 +3808,6 @@ static inline bool vfs_empty_path(int dfd, const char __user *path) return !c; } -bool generic_atomic_write_valid(struct iov_iter *iter, loff_t pos); +int generic_atomic_write_valid(struct kiocb *iocb, struct iov_iter *iter); #endif /* _LINUX_FS_H */ diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 6cf713a7e6c6..3cef566088fc 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -28,7 +28,8 @@ typedef int fs_param_type(struct p_log *, */ fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64, fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev, - fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid; + fs_param_is_path, fs_param_is_fd, fs_param_is_uid, fs_param_is_gid, + fs_param_is_file_or_string; /* * Specification of the type of value a parameter wants. @@ -133,6 +134,8 @@ static inline bool fs_validate_description(const char *name, #define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL) #define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL) #define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL) +#define fsparam_file_or_string(NAME, OPT) \ + __fsparam(fs_param_is_file_or_string, NAME, OPT, 0, NULL) #define fsparam_uid(NAME, OPT) __fsparam(fs_param_is_uid, NAME, OPT, 0, NULL) #define fsparam_gid(NAME, OPT) __fsparam(fs_param_is_gid, NAME, OPT, 0, NULL) diff --git a/include/linux/fsl/netc_global.h b/include/linux/fsl/netc_global.h new file mode 100644 index 000000000000..fdecca8c90f0 --- /dev/null +++ b/include/linux/fsl/netc_global.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */ +/* Copyright 2024 NXP + */ +#ifndef __NETC_GLOBAL_H +#define __NETC_GLOBAL_H + +#include <linux/io.h> + +static inline u32 netc_read(void __iomem *reg) +{ + return ioread32(reg); +} + +static inline void netc_write(void __iomem *reg, u32 val) +{ + iowrite32(val, reg); +} + +#endif diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index fd5e84d0ec47..aa9ddd1e4bb6 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -113,14 +113,54 @@ static inline int ftrace_mod_get_kallsym(unsigned int symnum, unsigned long *val #ifdef CONFIG_FUNCTION_TRACER -extern int ftrace_enabled; +#include <linux/ftrace_regs.h> -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +extern int ftrace_enabled; +/** + * ftrace_regs - ftrace partial/optimal register set + * + * ftrace_regs represents a group of registers which is used at the + * function entry and exit. There are three types of registers. + * + * - Registers for passing the parameters to callee, including the stack + * pointer. (e.g. rcx, rdx, rdi, rsi, r8, r9 and rsp on x86_64) + * - Registers for passing the return values to caller. + * (e.g. rax and rdx on x86_64) + * - Registers for hooking the function call and return including the + * frame pointer (the frame pointer is architecture/config dependent) + * (e.g. rip, rbp and rsp for x86_64) + * + * Also, architecture dependent fields can be used for internal process. + * (e.g. orig_ax on x86_64) + * + * On the function entry, those registers will be restored except for + * the stack pointer, so that user can change the function parameters + * and instruction pointer (e.g. live patching.) + * On the function exit, only registers which is used for return values + * are restored. + * + * NOTE: user *must not* access regs directly, only do it via APIs, because + * the member can be changed according to the architecture. + * This is why the structure is empty here, so that nothing accesses + * the ftrace_regs directly. + */ struct ftrace_regs { - struct pt_regs regs; + /* Nothing to see here, use the accessor functions! */ }; -#define arch_ftrace_get_regs(fregs) (&(fregs)->regs) + +#define ftrace_regs_size() sizeof(struct __arch_ftrace_regs) + +#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS +/* + * Architectures that define HAVE_DYNAMIC_FTRACE_WITH_ARGS must define their own + * arch_ftrace_get_regs() where it only returns pt_regs *if* it is fully + * populated. It should return NULL otherwise. + */ +static inline struct pt_regs *arch_ftrace_get_regs(struct ftrace_regs *fregs) +{ + return &arch_ftrace_regs(fregs)->regs; +} /* * ftrace_regs_set_instruction_pointer() is to be defined by the architecture @@ -150,23 +190,6 @@ static __always_inline bool ftrace_regs_has_args(struct ftrace_regs *fregs) return ftrace_get_regs(fregs) != NULL; } -#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS -#define ftrace_regs_get_instruction_pointer(fregs) \ - instruction_pointer(ftrace_get_regs(fregs)) -#define ftrace_regs_get_argument(fregs, n) \ - regs_get_kernel_argument(ftrace_get_regs(fregs), n) -#define ftrace_regs_get_stack_pointer(fregs) \ - kernel_stack_pointer(ftrace_get_regs(fregs)) -#define ftrace_regs_return_value(fregs) \ - regs_return_value(ftrace_get_regs(fregs)) -#define ftrace_regs_set_return_value(fregs, ret) \ - regs_set_return_value(ftrace_get_regs(fregs), ret) -#define ftrace_override_function_with_return(fregs) \ - override_function_with_return(ftrace_get_regs(fregs)) -#define ftrace_regs_query_register_offset(name) \ - regs_query_register_offset(name) -#endif - typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct ftrace_regs *fregs); @@ -1015,6 +1038,17 @@ struct ftrace_graph_ent { } __packed; /* + * Structure that defines an entry function trace with retaddr. + * It's already packed but the attribute "packed" is needed + * to remove extra padding at the end. + */ +struct fgraph_retaddr_ent { + unsigned long func; /* Current function */ + int depth; + unsigned long retaddr; /* Return address */ +} __packed; + +/* * Structure that defines a return function trace. * It's already packed but the attribute "packed" is needed * to remove extra padding at the end. @@ -1039,7 +1073,8 @@ typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *, typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *, struct fgraph_ops *); /* entry */ -extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, struct fgraph_ops *gops); +extern int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace, + struct fgraph_ops *gops); bool ftrace_pids_enabled(struct ftrace_ops *ops); #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -1055,6 +1090,7 @@ struct fgraph_ops { void *fgraph_reserve_data(int idx, int size_bytes); void *fgraph_retrieve_data(int idx, int *size_bytes); +void *fgraph_retrieve_parent_data(int idx, int *size_bytes, int depth); /* * Stack of return addresses for functions @@ -1064,10 +1100,6 @@ void *fgraph_retrieve_data(int idx, int *size_bytes); struct ftrace_ret_stack { unsigned long ret; unsigned long func; - unsigned long long calltime; -#ifdef CONFIG_FUNCTION_PROFILER - unsigned long long subtime; -#endif #ifdef HAVE_FUNCTION_GRAPH_FP_TEST unsigned long fp; #endif @@ -1087,6 +1119,7 @@ function_graph_enter(unsigned long ret, unsigned long func, struct ftrace_ret_stack * ftrace_graph_get_ret_stack(struct task_struct *task, int skip); +unsigned long ftrace_graph_top_ret_addr(struct task_struct *task); unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx, unsigned long ret, unsigned long *retp); diff --git a/include/linux/ftrace_regs.h b/include/linux/ftrace_regs.h new file mode 100644 index 000000000000..be1ed0c891d0 --- /dev/null +++ b/include/linux/ftrace_regs.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_FTRACE_REGS_H +#define _LINUX_FTRACE_REGS_H + +/* + * For archs that just copy pt_regs in ftrace regs, it can use this default. + * If an architecture does not use pt_regs, it must define all the below + * accessor functions. + */ +#ifndef HAVE_ARCH_FTRACE_REGS +struct __arch_ftrace_regs { + struct pt_regs regs; +}; + +#define arch_ftrace_regs(fregs) ((struct __arch_ftrace_regs *)(fregs)) + +struct ftrace_regs; + +#define ftrace_regs_get_instruction_pointer(fregs) \ + instruction_pointer(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_get_argument(fregs, n) \ + regs_get_kernel_argument(&arch_ftrace_regs(fregs)->regs, n) +#define ftrace_regs_get_stack_pointer(fregs) \ + kernel_stack_pointer(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_get_return_value(fregs) \ + regs_return_value(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_set_return_value(fregs, ret) \ + regs_set_return_value(&arch_ftrace_regs(fregs)->regs, ret) +#define ftrace_override_function_with_return(fregs) \ + override_function_with_return(&arch_ftrace_regs(fregs)->regs) +#define ftrace_regs_query_register_offset(name) \ + regs_query_register_offset(name) + +#endif /* HAVE_ARCH_FTRACE_REGS */ + +#endif /* _LINUX_FTRACE_REGS_H */ diff --git a/include/linux/gfp.h b/include/linux/gfp.h index a951de920e20..b0fe9f62d15b 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -306,7 +306,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct *vma, - unsigned long addr, bool hugepage); + unsigned long addr); #else static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order) { @@ -326,7 +326,7 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde { return folio_alloc_noprof(gfp, order); } -#define vma_alloc_folio_noprof(gfp, order, vma, addr, hugepage) \ +#define vma_alloc_folio_noprof(gfp, order, vma, addr) \ folio_alloc_noprof(gfp, order) #endif @@ -341,7 +341,7 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde static inline struct page *alloc_page_vma_noprof(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr, false); + struct folio *folio = vma_alloc_folio_noprof(gfp, 0, vma, addr); return &folio->page; } @@ -371,28 +371,6 @@ __meminit void *alloc_pages_exact_nid_noprof(int nid, size_t size, gfp_t gfp_mas extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); -struct page_frag_cache; -void page_frag_cache_drain(struct page_frag_cache *nc); -extern void __page_frag_cache_drain(struct page *page, unsigned int count); -void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, - gfp_t gfp_mask, unsigned int align_mask); - -static inline void *page_frag_alloc_align(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask, - unsigned int align) -{ - WARN_ON_ONCE(!is_power_of_2(align)); - return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align); -} - -static inline void *page_frag_alloc(struct page_frag_cache *nc, - unsigned int fragsz, gfp_t gfp_mask) -{ - return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); -} - -extern void page_frag_free(void *addr); - #define __free_page(page) __free_pages((page), 0) #define free_page(addr) free_pages((addr), 0) diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 2d105be7bbc3..6270150f4e29 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -21,9 +21,6 @@ struct device; #define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1)) #define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1)) -/* Gpio pin is active-low */ -#define GPIOF_ACTIVE_LOW (1 << 2) - /** * struct gpio - a structure describing a GPIO with configuration * @gpio: the GPIO number diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h index 6730ee900ee1..8a03d9696b1c 100644 --- a/include/linux/hid-sensor-ids.h +++ b/include/linux/hid-sensor-ids.h @@ -30,6 +30,8 @@ #define HID_USAGE_SENSOR_PROX 0x200011 #define HID_USAGE_SENSOR_DATA_PRESENCE 0x2004b0 #define HID_USAGE_SENSOR_HUMAN_PRESENCE 0x2004b1 +#define HID_USAGE_SENSOR_HUMAN_PROXIMITY 0x2004b2 +#define HID_USAGE_SENSOR_HUMAN_ATTENTION 0x2004bd /* Pressure (200031) */ #define HID_USAGE_SENSOR_PRESSURE 0x200031 diff --git a/include/linux/hid.h b/include/linux/hid.h index 121d5b8bc867..d11e9c9a5f15 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -359,6 +359,7 @@ struct hid_item { * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP: * | @HID_QUIRK_HAVE_SPECIAL_DRIVER: * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE: + * | @HID_QUIRK_IGNORE_SPECIAL_DRIVER * | @HID_QUIRK_FULLSPEED_INTERVAL: * | @HID_QUIRK_NO_INIT_REPORTS: * | @HID_QUIRK_NO_IGNORE: @@ -384,6 +385,7 @@ struct hid_item { #define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19) #define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20) #define HID_QUIRK_NOINVERT BIT(21) +#define HID_QUIRK_IGNORE_SPECIAL_DRIVER BIT(22) #define HID_QUIRK_FULLSPEED_INTERVAL BIT(28) #define HID_QUIRK_NO_INIT_REPORTS BIT(29) #define HID_QUIRK_NO_IGNORE BIT(30) @@ -599,15 +601,17 @@ enum hid_battery_status { struct hid_driver; struct hid_ll_driver; -struct hid_device { /* device report descriptor */ - const __u8 *dev_rdesc; - unsigned dev_rsize; - const __u8 *rdesc; - unsigned rsize; +struct hid_device { + const __u8 *dev_rdesc; /* device report descriptor */ + const __u8 *bpf_rdesc; /* bpf modified report descriptor, if any */ + const __u8 *rdesc; /* currently used report descriptor */ + unsigned int dev_rsize; + unsigned int bpf_rsize; + unsigned int rsize; + unsigned int collection_size; /* Number of allocated hid_collections */ struct hid_collection *collection; /* List of HID collections */ - unsigned collection_size; /* Number of allocated hid_collections */ - unsigned maxcollection; /* Number of parsed collections */ - unsigned maxapplication; /* Number of applications */ + unsigned int maxcollection; /* Number of parsed collections */ + unsigned int maxapplication; /* Number of applications */ __u16 bus; /* BUS ID */ __u16 group; /* Report group */ __u32 vendor; /* Vendor ID */ @@ -974,7 +978,6 @@ const struct hid_device_id *hid_match_device(struct hid_device *hdev, struct hid_driver *hdrv); bool hid_compare_device_paths(struct hid_device *hdev_a, struct hid_device *hdev_b, char separator); -s32 hid_snto32(__u32 value, unsigned n); __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, unsigned offset, unsigned n); diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index 6a47223e6460..a6876ab29004 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -212,7 +212,7 @@ int hid_bpf_connect_device(struct hid_device *hdev); void hid_bpf_disconnect_device(struct hid_device *hdev); void hid_bpf_destroy_device(struct hid_device *hid); int hid_bpf_device_init(struct hid_device *hid); -u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size); +const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size); #else /* CONFIG_HID_BPF */ static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 *size, int interrupt, @@ -228,13 +228,8 @@ static inline int hid_bpf_connect_device(struct hid_device *hdev) { return 0; } static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {} static inline void hid_bpf_destroy_device(struct hid_device *hid) {} static inline int hid_bpf_device_init(struct hid_device *hid) { return 0; } -/* - * This specialized allocator has to be a macro for its allocations to be - * accounted separately (to have a separate alloc_tag). The typecast is - * intentional to enforce typesafety. - */ -#define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \ - ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL)) +static inline const u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, + unsigned int *size) { return rdesc; } #endif /* CONFIG_HID_BPF */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 930a591b9b61..6e452bd8e7e3 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -224,13 +224,7 @@ static inline struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, unsigned long vaddr) { - struct folio *folio; - - folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr, false); - if (folio) - clear_user_highpage(&folio->page, vaddr); - - return folio; + return vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr); } #endif diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 9d7754ad5e9b..6dbd0d49628f 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -229,6 +229,12 @@ struct hisi_qm_status { struct hisi_qm; +enum acc_err_result { + ACC_ERR_NONE, + ACC_ERR_NEED_RESET, + ACC_ERR_RECOVERED, +}; + struct hisi_qm_err_info { char *acpi_rst; u32 msi_wr_port; @@ -257,9 +263,9 @@ struct hisi_qm_err_ini { void (*close_axi_master_ooo)(struct hisi_qm *qm); void (*open_sva_prefetch)(struct hisi_qm *qm); void (*close_sva_prefetch)(struct hisi_qm *qm); - void (*log_dev_hw_err)(struct hisi_qm *qm, u32 err_sts); void (*show_last_dfx_regs)(struct hisi_qm *qm); void (*err_info_init)(struct hisi_qm *qm); + enum acc_err_result (*get_err_result)(struct hisi_qm *qm); }; struct hisi_qm_cap_info { @@ -274,13 +280,25 @@ struct hisi_qm_cap_info { u32 v3_val; }; +struct hisi_qm_cap_query_info { + u32 type; + const char *name; + u32 offset; + u32 v1_val; + u32 v2_val; + u32 v3_val; +}; + struct hisi_qm_cap_record { u32 type; + const char *name; u32 cap_val; }; struct hisi_qm_cap_tables { + u32 qm_cap_size; struct hisi_qm_cap_record *qm_cap_table; + u32 dev_cap_size; struct hisi_qm_cap_record *dev_cap_table; }; @@ -436,37 +454,6 @@ struct hisi_qp { struct uacce_queue *uacce_q; }; -static inline int q_num_set(const char *val, const struct kernel_param *kp, - unsigned int device) -{ - struct pci_dev *pdev; - u32 n, q_num; - int ret; - - if (!val) - return -EINVAL; - - pdev = pci_get_device(PCI_VENDOR_ID_HUAWEI, device, NULL); - if (!pdev) { - q_num = min_t(u32, QM_QNUM_V1, QM_QNUM_V2); - pr_info("No device found currently, suppose queue number is %u\n", - q_num); - } else { - if (pdev->revision == QM_HW_V1) - q_num = QM_QNUM_V1; - else - q_num = QM_QNUM_V2; - - pci_dev_put(pdev); - } - - ret = kstrtou32(val, 10, &n); - if (ret || n < QM_MIN_QNUM || n > q_num) - return -EINVAL; - - return param_set_int(val, kp); -} - static inline int vfs_num_set(const char *val, const struct kernel_param *kp) { u32 n; @@ -526,6 +513,8 @@ static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_ mutex_unlock(&qm_list->lock); } +int hisi_qm_q_num_set(const char *val, const struct kernel_param *kp, + unsigned int device); int hisi_qm_init(struct hisi_qm *qm); void hisi_qm_uninit(struct hisi_qm *qm); int hisi_qm_start(struct hisi_qm *qm); @@ -583,6 +572,9 @@ void hisi_qm_regs_dump(struct seq_file *s, struct debugfs_regset32 *regset); u32 hisi_qm_get_hw_info(struct hisi_qm *qm, const struct hisi_qm_cap_info *info_table, u32 index, bool is_read); +u32 hisi_qm_get_cap_value(struct hisi_qm *qm, + const struct hisi_qm_cap_query_info *info_table, + u32 index, bool is_read); int hisi_qm_set_algs(struct hisi_qm *qm, u64 alg_msk, const struct qm_dev_alg *dev_algs, u32 dev_algs_size); diff --git a/include/linux/host1x.h b/include/linux/host1x.h index c4dde3aafcac..9fa9c30a34e6 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -14,12 +14,17 @@ enum host1x_class { HOST1X_CLASS_HOST1X = 0x1, + HOST1X_CLASS_NVJPG1 = 0x7, + HOST1X_CLASS_NVENC = 0x21, + HOST1X_CLASS_NVENC1 = 0x22, HOST1X_CLASS_GR2D = 0x51, HOST1X_CLASS_GR2D_SB = 0x52, HOST1X_CLASS_VIC = 0x5D, HOST1X_CLASS_GR3D = 0x60, + HOST1X_CLASS_NVJPG = 0xC0, HOST1X_CLASS_NVDEC = 0xF0, HOST1X_CLASS_NVDEC1 = 0xF5, + HOST1X_CLASS_OFA = 0xF8, }; struct host1x; diff --git a/include/linux/host1x_context_bus.h b/include/linux/host1x_context_bus.h index 72462737a6db..c928cb432680 100644 --- a/include/linux/host1x_context_bus.h +++ b/include/linux/host1x_context_bus.h @@ -9,7 +9,7 @@ #include <linux/device.h> #ifdef CONFIG_TEGRA_HOST1X_CONTEXT_BUS -extern struct bus_type host1x_context_device_bus_type; +extern const struct bus_type host1x_context_device_bus_type; #endif #endif diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index aa1e65ccb615..7ef5f7ef31a9 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -228,32 +228,17 @@ static inline void hrtimer_cancel_wait_running(struct hrtimer *timer) /* Initialize timers: */ extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock, enum hrtimer_mode mode); -extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, clockid_t clock_id, - enum hrtimer_mode mode); +extern void hrtimer_setup(struct hrtimer *timer, enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t clock_id, enum hrtimer_mode mode); +extern void hrtimer_setup_on_stack(struct hrtimer *timer, + enum hrtimer_restart (*function)(struct hrtimer *), + clockid_t clock_id, enum hrtimer_mode mode); +extern void hrtimer_setup_sleeper_on_stack(struct hrtimer_sleeper *sl, clockid_t clock_id, + enum hrtimer_mode mode); #ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock, - enum hrtimer_mode mode); -extern void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, - clockid_t clock_id, - enum hrtimer_mode mode); - extern void destroy_hrtimer_on_stack(struct hrtimer *timer); #else -static inline void hrtimer_init_on_stack(struct hrtimer *timer, - clockid_t which_clock, - enum hrtimer_mode mode) -{ - hrtimer_init(timer, which_clock, mode); -} - -static inline void hrtimer_init_sleeper_on_stack(struct hrtimer_sleeper *sl, - clockid_t clock_id, - enum hrtimer_mode mode) -{ - hrtimer_init_sleeper(sl, clock_id, mode); -} - static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { } #endif @@ -337,6 +322,28 @@ static inline int hrtimer_callback_running(struct hrtimer *timer) return timer->base->running == timer; } +/** + * hrtimer_update_function - Update the timer's callback function + * @timer: Timer to update + * @function: New callback function + * + * Only safe to call if the timer is not enqueued. Can be called in the callback function if the + * timer is not enqueued at the same time (see the comments above HRTIMER_STATE_ENQUEUED). + */ +static inline void hrtimer_update_function(struct hrtimer *timer, + enum hrtimer_restart (*function)(struct hrtimer *)) +{ + guard(raw_spinlock_irqsave)(&timer->base->cpu_base->lock); + + if (WARN_ON_ONCE(hrtimer_is_queued(timer))) + return; + + if (WARN_ON_ONCE(!function)) + return; + + timer->function = function; +} + /* Forward a hrtimer so it expires after now: */ extern u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ef5b80e48599..b94c2e8ee918 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -2,7 +2,6 @@ #ifndef _LINUX_HUGE_MM_H #define _LINUX_HUGE_MM_H -#include <linux/sched/coredump.h> #include <linux/mm_types.h> #include <linux/fs.h> /* only for vma_is_dax() */ @@ -120,6 +119,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_ALLOC, MTHP_STAT_ANON_FAULT_FALLBACK, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_ZSWPOUT, + MTHP_STAT_SWPIN, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, MTHP_STAT_SHMEM_ALLOC, @@ -253,19 +254,6 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, return orders; } -static inline bool file_thp_enabled(struct vm_area_struct *vma) -{ - struct inode *inode; - - if (!vma->vm_file) - return false; - - inode = vma->vm_file->f_inode; - - return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) && - !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode); -} - unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, unsigned long vm_flags, unsigned long tva_flags, diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index e4697539b665..ae4fe8615bb6 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -546,16 +546,10 @@ static inline struct hstate *hstate_inode(struct inode *i) } #endif /* !CONFIG_HUGETLBFS */ -#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); -#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ - unsigned long -generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags); /* * huegtlb page specific state flags. These flags are located in page.private @@ -1035,9 +1029,19 @@ void hugetlb_unregister_node(struct node *node); */ bool is_raw_hwpoison_page_in_hugepage(struct page *page); +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return PAGE_MASK & ~huge_page_mask(hstate_file(file)); +} + #else /* CONFIG_HUGETLB_PAGE */ struct hstate {}; +static inline unsigned long huge_page_mask_align(struct file *file) +{ + return 0; +} + static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) { return NULL; diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index 5c6a421ad580..3a63dff62d03 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -368,7 +368,9 @@ enum hwmon_intrusion_attributes { /** * struct hwmon_ops - hwmon device operations - * @is_visible: Callback to return attribute visibility. Mandatory. + * @visible: Static visibility. If non-zero, 'is_visible' is ignored. + * @is_visible: Callback to return attribute visibility. Mandatory unless + * 'visible' is non-zero. * Parameters are: * @const void *drvdata: * Pointer to driver-private data structure passed @@ -412,6 +414,7 @@ enum hwmon_intrusion_attributes { * The function returns 0 on success or a negative error number. */ struct hwmon_ops { + umode_t visible; umode_t (*is_visible)(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel); int (*read)(struct device *dev, enum hwmon_sensor_types type, diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 2a1ed05d5782..12d532b012c5 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -298,7 +298,9 @@ enum i3c_open_drain_speed { * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask - * + * @I3C_ADDR_SLOT_EXT_STATUS_MASK: address slot mask with extended information + * @I3C_ADDR_SLOT_EXT_DESIRED: the bitmask represents addresses that are preferred by some devices, + * such as the "assigned-address" property in a device tree source. * On an I3C bus, addresses are assigned dynamically, and we need to know which * addresses are free to use and which ones are already assigned. * @@ -311,8 +313,12 @@ enum i3c_addr_slot_status { I3C_ADDR_SLOT_I2C_DEV, I3C_ADDR_SLOT_I3C_DEV, I3C_ADDR_SLOT_STATUS_MASK = 3, + I3C_ADDR_SLOT_EXT_STATUS_MASK = 7, + I3C_ADDR_SLOT_EXT_DESIRED = BIT(2), }; +#define I3C_ADDR_SLOT_STATUS_BITS 4 + /** * struct i3c_bus - I3C bus object * @cur_master: I3C master currently driving the bus. Since I3C is multi-master @@ -354,7 +360,7 @@ enum i3c_addr_slot_status { struct i3c_bus { struct i3c_dev_desc *cur_master; int id; - unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG]; + unsigned long addrslots[((I2C_MAX_ADDR + 1) * I3C_ADDR_SLOT_STATUS_BITS) / BITS_PER_LONG]; enum i3c_bus_mode mode; struct { unsigned long i3c; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 456bca45ff05..05dedc45505c 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1445,6 +1445,8 @@ struct ieee80211_mgmt { __le16 status; __le16 capab; __le16 timeout; + /* followed by BA Extension */ + u8 variable[]; } __packed addba_resp; struct{ u8 action_code; diff --git a/include/linux/if_ltalk.h b/include/linux/if_ltalk.h deleted file mode 100644 index 4cc1c0b77870..000000000000 --- a/include/linux/if_ltalk.h +++ /dev/null @@ -1,8 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __LINUX_LTALK_H -#define __LINUX_LTALK_H - -#include <uapi/linux/if_ltalk.h> - -extern struct net_device *alloc_ltalkdev(int sizeof_priv); -#endif diff --git a/include/linux/iio/backend.h b/include/linux/iio/backend.h index 37d56914d485..10be00f3b120 100644 --- a/include/linux/iio/backend.h +++ b/include/linux/iio/backend.h @@ -14,12 +14,14 @@ struct iio_dev; enum iio_backend_data_type { IIO_BACKEND_TWOS_COMPLEMENT, IIO_BACKEND_OFFSET_BINARY, + IIO_BACKEND_DATA_UNSIGNED, IIO_BACKEND_DATA_TYPE_MAX }; enum iio_backend_data_source { IIO_BACKEND_INTERNAL_CONTINUOUS_WAVE, IIO_BACKEND_EXTERNAL, + IIO_BACKEND_INTERNAL_RAMP_16BIT, IIO_BACKEND_DATA_SOURCE_MAX }; @@ -89,6 +91,11 @@ enum iio_backend_sample_trigger { * @read_raw: Read a channel attribute from a backend device * @debugfs_print_chan_status: Print channel status into a buffer. * @debugfs_reg_access: Read or write register value of backend. + * @ddr_enable: Enable interface DDR (Double Data Rate) mode. + * @ddr_disable: Disable interface DDR (Double Data Rate) mode. + * @data_stream_enable: Enable data stream. + * @data_stream_disable: Disable data stream. + * @data_transfer_addr: Set data address. **/ struct iio_backend_ops { int (*enable)(struct iio_backend *back); @@ -129,6 +136,11 @@ struct iio_backend_ops { size_t len); int (*debugfs_reg_access)(struct iio_backend *back, unsigned int reg, unsigned int writeval, unsigned int *readval); + int (*ddr_enable)(struct iio_backend *back); + int (*ddr_disable)(struct iio_backend *back); + int (*data_stream_enable)(struct iio_backend *back); + int (*data_stream_disable)(struct iio_backend *back); + int (*data_transfer_addr)(struct iio_backend *back, u32 address); }; /** @@ -164,6 +176,11 @@ int iio_backend_data_sample_trigger(struct iio_backend *back, int devm_iio_backend_request_buffer(struct device *dev, struct iio_backend *back, struct iio_dev *indio_dev); +int iio_backend_ddr_enable(struct iio_backend *back); +int iio_backend_ddr_disable(struct iio_backend *back); +int iio_backend_data_stream_enable(struct iio_backend *back); +int iio_backend_data_stream_disable(struct iio_backend *back); +int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address); ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, const struct iio_chan_spec *chan, const char *buf, size_t len); diff --git a/include/linux/iio/driver.h b/include/linux/iio/driver.h index 7a157ed218f6..7f8b55551ed0 100644 --- a/include/linux/iio/driver.h +++ b/include/linux/iio/driver.h @@ -18,7 +18,7 @@ struct iio_map; * @map: array of mappings specifying association of channel with client */ int iio_map_array_register(struct iio_dev *indio_dev, - struct iio_map *map); + const struct iio_map *map); /** * iio_map_array_unregister() - tell the core to remove consumer mappings for @@ -38,6 +38,7 @@ int iio_map_array_unregister(struct iio_dev *indio_dev); * handle de-registration of the IIO map object when the device's refcount goes to * zero. */ -int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps); +int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, + const struct iio_map *maps); #endif diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h index a4558c45a548..72062a0c7c87 100644 --- a/include/linux/iio/events.h +++ b/include/linux/iio/events.h @@ -10,7 +10,7 @@ #include <uapi/linux/iio/events.h> /** - * IIO_EVENT_CODE() - create event identifier + * _IIO_EVENT_CODE() - create event identifier * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @diff: Whether the event is for an differential channel or not. * @modifier: Modifier for the channel. Should be one of enum iio_modifier. @@ -19,10 +19,13 @@ * @chan: Channel number for non-differential channels. * @chan1: First channel number for differential channels. * @chan2: Second channel number for differential channels. + * + * Drivers should use the specialized macros below instead of using this one + * directly. */ -#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \ - type, chan, chan1, chan2) \ +#define _IIO_EVENT_CODE(chan_type, diff, modifier, direction, \ + type, chan, chan1, chan2) \ (((u64)type << 56) | ((u64)diff << 55) | \ ((u64)direction << 48) | ((u64)modifier << 40) | \ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \ @@ -30,7 +33,8 @@ /** - * IIO_MOD_EVENT_CODE() - create event identifier for modified channels + * IIO_MOD_EVENT_CODE() - create event identifier for modified (non + * differential) channels * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @number: Channel number. * @modifier: Modifier for the channel. Should be one of enum iio_modifier. @@ -40,10 +44,11 @@ #define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \ type, direction) \ - IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0) + _IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0) /** - * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels + * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified (non + * differential) channels * @chan_type: Type of the channel. Should be one of enum iio_chan_type. * @number: Channel number. * @type: Type of the event. Should be one of enum iio_event_type. @@ -51,6 +56,18 @@ */ #define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \ - IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0) + _IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0) + +/** + * IIO_DIFF_EVENT_CODE() - create event identifier for differential channels + * @chan_type: Type of the channel. Should be one of enum iio_chan_type. + * @chan1: First channel number for differential channels. + * @chan2: Second channel number for differential channels. + * @type: Type of the event. Should be one of enum iio_event_type. + * @direction: Direction of the event. One of enum iio_event_direction. + */ + +#define IIO_DIFF_EVENT_CODE(chan_type, chan1, chan2, type, direction) \ + _IIO_EVENT_CODE(chan_type, 1, 0, direction, type, 0, chan1, chan2) #endif diff --git a/include/linux/iio/iio-opaque.h b/include/linux/iio/iio-opaque.h index 5aec3945555b..a89e7e43e441 100644 --- a/include/linux/iio/iio-opaque.h +++ b/include/linux/iio/iio-opaque.h @@ -70,7 +70,7 @@ struct iio_dev_opaque { #if defined(CONFIG_DEBUG_FS) struct dentry *debugfs_dentry; - unsigned cached_reg_addr; + unsigned int cached_reg_addr; char read_buf[20]; unsigned int read_buf_len; #endif diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index 18779b631e90..ae65890d4567 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -282,11 +282,11 @@ struct iio_chan_spec { const struct iio_chan_spec_ext_info *ext_info; const char *extend_name; const char *datasheet_name; - unsigned modified:1; - unsigned indexed:1; - unsigned output:1; - unsigned differential:1; - unsigned has_ext_scan_type:1; + unsigned int modified:1; + unsigned int indexed:1; + unsigned int output:1; + unsigned int differential:1; + unsigned int has_ext_scan_type:1; }; @@ -514,7 +514,7 @@ struct iio_info { const struct iio_chan_spec *chan, enum iio_event_type type, enum iio_event_direction dir, - int state); + bool state); int (*read_event_value)(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, @@ -541,13 +541,13 @@ struct iio_info { int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask); int (*debugfs_reg_access)(struct iio_dev *indio_dev, - unsigned reg, unsigned writeval, - unsigned *readval); + unsigned int reg, unsigned int writeval, + unsigned int *readval); int (*fwnode_xlate)(struct iio_dev *indio_dev, const struct fwnode_reference_args *iiospec); - int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val); + int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val); int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev, - unsigned count); + unsigned int count); }; /** @@ -609,7 +609,7 @@ struct iio_dev { int scan_bytes; const unsigned long *available_scan_masks; - unsigned __private masklength; + unsigned int __private masklength; const unsigned long *active_scan_mask; bool scan_timestamp; struct iio_trigger *trig; @@ -624,7 +624,7 @@ struct iio_dev { const struct iio_info *info; const struct iio_buffer_setup_ops *setup_ops; - void *priv; + void *__private priv; }; int iio_device_id(struct iio_dev *indio_dev); @@ -785,7 +785,7 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); /* The information at the returned address is guaranteed to be cacheline aligned */ static inline void *iio_priv(const struct iio_dev *indio_dev) { - return indio_dev->priv; + return ACCESS_PRIVATE(indio_dev, priv); } void iio_device_free(struct iio_dev *indio_dev); @@ -831,6 +831,7 @@ int iio_device_resume_triggering(struct iio_dev *indio_dev); bool iio_read_acpi_mount_matrix(struct device *dev, struct iio_mount_matrix *orientation, char *acpi_method); +const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data); #else static inline bool iio_read_acpi_mount_matrix(struct device *dev, struct iio_mount_matrix *orientation, @@ -838,7 +839,16 @@ static inline bool iio_read_acpi_mount_matrix(struct device *dev, { return false; } +static inline const char * +iio_get_acpi_device_name_and_data(struct device *dev, const void **data) +{ + return NULL; +} #endif +static inline const char *iio_get_acpi_device_name(struct device *dev) +{ + return iio_get_acpi_device_name_and_data(dev, NULL); +} /** * iio_get_current_scan_type - Get the current scan type for a channel diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index cb5280e6cc21..5730ba6b1cfa 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -141,7 +141,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev) ARP_EVICT_NOCARRIER) struct in_ifaddr { - struct hlist_node hash; + struct hlist_node addr_lst; struct in_ifaddr __rcu *ifa_next; struct in_device *ifa_dev; struct rcu_head rcu_head; @@ -226,6 +226,10 @@ static __inline__ bool bad_mask(__be32 mask, __be32 addr) for (ifa = rtnl_dereference((in_dev)->ifa_list); ifa; \ ifa = rtnl_dereference(ifa->ifa_next)) +#define in_dev_for_each_ifa_rtnl_net(net, ifa, in_dev) \ + for (ifa = rtnl_net_dereference(net, (in_dev)->ifa_list); ifa; \ + ifa = rtnl_net_dereference(net, ifa->ifa_next)) + #define in_dev_for_each_ifa_rcu(ifa, in_dev) \ for (ifa = rcu_dereference((in_dev)->ifa_list); ifa; \ ifa = rcu_dereference(ifa->ifa_next)) @@ -252,6 +256,11 @@ static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) return rtnl_dereference(dev->ip_ptr); } +static inline struct in_device *__in_dev_get_rtnl_net(const struct net_device *dev) +{ + return rtnl_net_dereference(dev_net(dev), dev->ip_ptr); +} + /* called with rcu_read_lock or rtnl held */ static inline bool ip_ignore_linkdown(const struct net_device *dev) { diff --git a/include/linux/input.h b/include/linux/input.h index cd866b020a01..7d7cb0593a63 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -286,6 +286,10 @@ struct input_handle; * @start: starts handler for given handle. This function is called by * input core right after connect() method and also when a process * that "grabbed" a device releases it + * @passive_observer: set to %true by drivers only interested in observing + * data stream from devices if there are other users present. Such + * drivers will not result in starting underlying hardware device + * when input_open_device() is called for their handles * @legacy_minors: set to %true by drivers using legacy minor ranges * @minor: beginning of range of 32 legacy minors for devices this driver * can provide @@ -321,6 +325,7 @@ struct input_handler { void (*disconnect)(struct input_handle *handle); void (*start)(struct input_handle *handle); + bool passive_observer; bool legacy_minors; int minor; const char *name; diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h index 11ee185566c3..b94beab64610 100644 --- a/include/linux/intel_vsec.h +++ b/include/linux/intel_vsec.h @@ -74,10 +74,11 @@ enum intel_vsec_quirks { * @pdev: PCI device reference for the callback's use * @guid: ID of data to acccss * @data: buffer for the data to be copied + * @off: offset into the requested buffer * @count: size of buffer */ struct pmt_callbacks { - int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, u32 count); + int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, loff_t off, u32 count); }; /** diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 457151f9f263..8cd9327e4e78 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -616,6 +616,53 @@ extern void __raise_softirq_irqoff(unsigned int nr); extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +/* + * With forced-threaded interrupts enabled a raised softirq is deferred to + * ksoftirqd unless it can be handled within the threaded interrupt. This + * affects timer_list timers and hrtimers which are explicitly marked with + * HRTIMER_MODE_SOFT. + * With PREEMPT_RT enabled more hrtimers are moved to softirq for processing + * which includes all timers which are not explicitly marked HRTIMER_MODE_HARD. + * Userspace controlled timers (like the clock_nanosleep() interface) is divided + * into two categories: Tasks with elevated scheduling policy including + * SCHED_{FIFO|RR|DL} and the remaining scheduling policy. The tasks with the + * elevated scheduling policy are woken up directly from the HARDIRQ while all + * other wake ups are delayed to softirq and so to ksoftirqd. + * + * The ksoftirqd runs at SCHED_OTHER policy at which it should remain since it + * handles the softirq in an overloaded situation (not handled everything + * within its last run). + * If the timers are handled at SCHED_OTHER priority then they competes with all + * other SCHED_OTHER tasks for CPU resources are possibly delayed. + * Moving timers softirqs to a low priority SCHED_FIFO thread instead ensures + * that timer are performed before scheduling any SCHED_OTHER thread. + */ +DECLARE_PER_CPU(struct task_struct *, ktimerd); +DECLARE_PER_CPU(unsigned long, pending_timer_softirq); +void raise_ktimers_thread(unsigned int nr); + +static inline unsigned int local_timers_pending_force_th(void) +{ + return __this_cpu_read(pending_timer_softirq); +} + +static inline void raise_timer_softirq(unsigned int nr) +{ + lockdep_assert_in_irq(); + if (force_irqthreads()) + raise_ktimers_thread(nr); + else + __raise_softirq_irqoff(nr); +} + +static inline unsigned int local_timers_pending(void) +{ + if (force_irqthreads()) + return local_timers_pending_force_th(); + else + return local_softirq_pending(); +} + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); static inline struct task_struct *this_cpu_ksoftirqd(void) diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index b1ecfc3cd5bc..ce86b09ae80f 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -87,6 +87,7 @@ struct io_pgtable_cfg { * attributes set in the TCR for a non-coherent page-table walker. * * IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable. + * IO_PGTABLE_QUIRK_ARM_S2FWB: Use the FWB format for the MemAttrs bits */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) @@ -95,6 +96,7 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) #define IO_PGTABLE_QUIRK_ARM_HD BIT(7) + #define IO_PGTABLE_QUIRK_ARM_S2FWB BIT(8) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index c189d36ad55e..578a3fdf5c71 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -110,7 +110,7 @@ static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd, static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd) { - return cmd_to_io_kiocb(cmd)->task; + return cmd_to_io_kiocb(cmd)->tctx->task; } #endif /* _LINUX_IO_URING_CMD_H */ diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 4b9ba523978d..011860ade268 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -37,6 +37,7 @@ enum io_uring_cmd_flags { /* set when uring wants to cancel a previously issued command */ IO_URING_F_CANCEL = (1 << 11), IO_URING_F_COMPAT = (1 << 12), + IO_URING_F_TASK_DEAD = (1 << 13), }; struct io_wq_work_node { @@ -55,19 +56,18 @@ struct io_wq_work { int cancel_seq; }; -struct io_fixed_file { - /* file * with additional FFS_* flags */ - unsigned long file_ptr; +struct io_rsrc_data { + unsigned int nr; + struct io_rsrc_node **nodes; }; struct io_file_table { - struct io_fixed_file *files; + struct io_rsrc_data data; unsigned long *bitmap; unsigned int alloc_hint; }; struct io_hash_bucket { - spinlock_t lock; struct hlist_head list; } ____cacheline_aligned_in_smp; @@ -76,6 +76,12 @@ struct io_hash_table { unsigned hash_bits; }; +struct io_mapped_region { + struct page **pages; + void *vmap_ptr; + size_t nr_pages; +}; + /* * Arbitrary limit, can be raised if need be */ @@ -85,6 +91,7 @@ struct io_uring_task { /* submission side */ int cached_refs; const struct io_ring_ctx *last; + struct task_struct *task; struct io_wq *io_wq; struct file *registered_rings[IO_RINGFD_REG_MAX]; @@ -270,7 +277,6 @@ struct io_ring_ctx { * Fixed resources fast path, should be accessed only under * uring_lock, and updated through io_uring_register(2) */ - struct io_rsrc_node *rsrc_node; atomic_t cancel_seq; /* @@ -283,15 +289,13 @@ struct io_ring_ctx { struct io_wq_work_list iopoll_list; struct io_file_table file_table; - struct io_mapped_ubuf **user_bufs; - unsigned nr_user_files; - unsigned nr_user_bufs; + struct io_rsrc_data buf_table; struct io_submit_state submit_state; struct xarray io_bl_xa; - struct io_hash_table cancel_table_locked; + struct io_hash_table cancel_table; struct io_alloc_cache apoll_cache; struct io_alloc_cache netmsg_cache; struct io_alloc_cache rw_cache; @@ -302,6 +306,11 @@ struct io_ring_ctx { * ->uring_cmd() by io_uring_cmd_insert_cancelable() */ struct hlist_head cancelable_uring_cmd; + /* + * For Hybrid IOPOLL, runtime in hybrid polling, without + * scheduling time + */ + u64 hybrid_poll_time; } ____cacheline_aligned_in_smp; struct { @@ -316,6 +325,9 @@ struct io_ring_ctx { unsigned cq_entries; struct io_ev_fd __rcu *io_ev_fd; unsigned cq_extra; + + void *cq_wait_arg; + size_t cq_wait_size; } ____cacheline_aligned_in_smp; /* @@ -324,6 +336,7 @@ struct io_ring_ctx { */ struct { struct llist_head work_llist; + struct llist_head retry_llist; unsigned long check_cq; atomic_t cq_wait_nr; atomic_t cq_timeouts; @@ -342,7 +355,6 @@ struct io_ring_ctx { struct list_head io_buffers_comp; struct list_head cq_overflow_list; - struct io_hash_table cancel_table; struct hlist_head waitid_list; @@ -366,16 +378,6 @@ struct io_ring_ctx { struct wait_queue_head poll_wq; struct io_restriction restrictions; - /* slow path rsrc auxilary data, used by update/register */ - struct io_rsrc_data *file_data; - struct io_rsrc_data *buf_data; - - /* protected by ->uring_lock */ - struct list_head rsrc_ref_list; - struct io_alloc_cache rsrc_node_cache; - struct wait_queue_head rsrc_quiesce_wq; - unsigned rsrc_quiesce; - u32 pers_next; struct xarray personalities; @@ -409,7 +411,7 @@ struct io_ring_ctx { /* napi busy poll default timeout */ ktime_t napi_busy_poll_dt; bool napi_prefer_busy_poll; - bool napi_enabled; + u8 napi_track_mode; DECLARE_HASHTABLE(napi_ht, 4); #endif @@ -418,6 +420,13 @@ struct io_ring_ctx { unsigned evfd_last_cq_tail; /* + * Protection for resize vs mmap races - both the mmap and resize + * side will need to grab this lock, to prevent either side from + * being run concurrently with the other. + */ + struct mutex resize_lock; + + /* * If IORING_SETUP_NO_MMAP is used, then the below holds * the gup'ed pages for the two rings, and the sqes. */ @@ -425,6 +434,9 @@ struct io_ring_ctx { unsigned short n_sqe_pages; struct page **ring_pages; struct page **sqe_pages; + + /* used for optimised request parameter and wait argument passing */ + struct io_mapped_region param_region; }; struct io_tw_state { @@ -447,6 +459,7 @@ enum { REQ_F_LINK_TIMEOUT_BIT, REQ_F_NEED_CLEANUP_BIT, REQ_F_POLLED_BIT, + REQ_F_HYBRID_IOPOLL_STATE_BIT, REQ_F_BUFFER_SELECTED_BIT, REQ_F_BUFFER_RING_BIT, REQ_F_REISSUE_BIT, @@ -459,7 +472,6 @@ enum { REQ_F_DOUBLE_POLL_BIT, REQ_F_APOLL_MULTISHOT_BIT, REQ_F_CLEAR_POLLIN_BIT, - REQ_F_HASH_LOCKED_BIT, /* keep async read/write and isreg together and in order */ REQ_F_SUPPORT_NOWAIT_BIT, REQ_F_ISREG_BIT, @@ -468,6 +480,7 @@ enum { REQ_F_BL_EMPTY_BIT, REQ_F_BL_NO_RECYCLE_BIT, REQ_F_BUFFERS_COMMIT_BIT, + REQ_F_BUF_NODE_BIT, /* not a real bit, just to check we're not overflowing the space */ __REQ_F_LAST_BIT, @@ -506,6 +519,8 @@ enum { REQ_F_NEED_CLEANUP = IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT), /* already went through poll handler */ REQ_F_POLLED = IO_REQ_FLAG(REQ_F_POLLED_BIT), + /* every req only blocks once in hybrid poll */ + REQ_F_IOPOLL_STATE = IO_REQ_FLAG(REQ_F_HYBRID_IOPOLL_STATE_BIT), /* buffer already selected */ REQ_F_BUFFER_SELECTED = IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT), /* buffer selected from ring, needs commit */ @@ -534,8 +549,6 @@ enum { REQ_F_APOLL_MULTISHOT = IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT), /* recvmsg special flag, clear EPOLLIN */ REQ_F_CLEAR_POLLIN = IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT), - /* hashed into ->cancel_hash_locked, protected by ->uring_lock */ - REQ_F_HASH_LOCKED = IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT), /* don't use lazy poll wake for this request */ REQ_F_POLL_NO_LAZY = IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT), /* file is pollable */ @@ -546,6 +559,8 @@ enum { REQ_F_BL_NO_RECYCLE = IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT), /* buffer ring head needs incrementing on put */ REQ_F_BUFFERS_COMMIT = IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT), + /* buf node is valid */ + REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT), }; typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts); @@ -615,12 +630,9 @@ struct io_kiocb { struct io_cqe cqe; struct io_ring_ctx *ctx; - struct task_struct *task; + struct io_uring_task *tctx; union { - /* store used ubuf, so we can prevent reloading */ - struct io_mapped_ubuf *imu; - /* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */ struct io_buffer *kbuf; @@ -629,6 +641,8 @@ struct io_kiocb { * REQ_F_BUFFER_RING is set. */ struct io_buffer_list *buf_list; + + struct io_rsrc_node *buf_node; }; union { @@ -638,13 +652,20 @@ struct io_kiocb { __poll_t apoll_events; }; - struct io_rsrc_node *rsrc_node; + struct io_rsrc_node *file_node; atomic_t refs; bool cancel_seq_set; struct io_task_work io_task_work; - /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ - struct hlist_node hash_node; + union { + /* + * for polled requests, i.e. IORING_OP_POLL_ADD and async armed + * poll + */ + struct hlist_node hash_node; + /* For IOPOLL setup queues, with hybrid polling */ + u64 iopoll_start; + }; /* internal polling, see IORING_FEAT_FAST_POLL */ struct async_poll *apoll; /* opcode allocated if it needs to store data for async defer */ @@ -667,4 +688,9 @@ struct io_overflow_cqe { struct io_uring_cqe cqe; }; +static inline bool io_ctx_cqe32(struct io_ring_ctx *ctx) +{ + return ctx->flags & IORING_SETUP_CQE32; +} + #endif diff --git a/include/linux/iomap.h b/include/linux/iomap.h index f61407e3b121..5675af6b740c 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -53,6 +53,9 @@ struct vm_fault; * * IOMAP_F_XATTR indicates that the iomap is for an extended attribute extent * rather than a file data extent. + * + * IOMAP_F_BOUNDARY indicates that I/O and I/O completions for this iomap must + * never be merged with the mapping before it. */ #define IOMAP_F_NEW (1U << 0) #define IOMAP_F_DIRTY (1U << 1) @@ -64,6 +67,7 @@ struct vm_fault; #define IOMAP_F_BUFFER_HEAD 0 #endif /* CONFIG_BUFFER_HEAD */ #define IOMAP_F_XATTR (1U << 5) +#define IOMAP_F_BOUNDARY (1U << 6) /* * Flags set by the core iomap code during operations: @@ -178,6 +182,7 @@ struct iomap_folio_ops { #else #define IOMAP_DAX 0 #endif /* CONFIG_FS_DAX */ +#define IOMAP_ATOMIC (1 << 9) struct iomap_ops { /* diff --git a/include/linux/iommu.h b/include/linux/iommu.h index bd722f473635..318d27841130 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -42,6 +42,8 @@ struct notifier_block; struct iommu_sva; struct iommu_dma_cookie; struct iommu_fault_param; +struct iommufd_ctx; +struct iommufd_viommu; #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ @@ -491,7 +493,9 @@ static inline int __iommu_copy_struct_from_user_array( * @index: Index to the location in the array to copy user data from * @min_last: The last member of the data structure @kdst points in the * initial version. - * Return 0 for success, otherwise -error. + * + * Copy a single entry from a user array. Return 0 for success, otherwise + * -error. */ #define iommu_copy_struct_from_user_array(kdst, user_array, data_type, index, \ min_last) \ @@ -500,6 +504,50 @@ static inline int __iommu_copy_struct_from_user_array( offsetofend(typeof(*(kdst)), min_last)) /** + * iommu_copy_struct_from_full_user_array - Copy iommu driver specific user + * space data from an iommu_user_data_array + * @kdst: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @kdst_entry_size: sizeof(*kdst) + * @user_array: Pointer to a struct iommu_user_data_array for a user space + * array + * @data_type: The data type of the @kdst. Must match with @user_array->type + * + * Copy the entire user array. kdst must have room for kdst_entry_size * + * user_array->entry_num bytes. Return 0 for success, otherwise -error. + */ +static inline int +iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, + struct iommu_user_data_array *user_array, + unsigned int data_type) +{ + unsigned int i; + int ret; + + if (user_array->type != data_type) + return -EINVAL; + if (!user_array->entry_num) + return -EINVAL; + if (likely(user_array->entry_len == kdst_entry_size)) { + if (copy_from_user(kdst, user_array->uptr, + user_array->entry_num * + user_array->entry_len)) + return -EFAULT; + } + + /* Copy item by item */ + for (i = 0; i != user_array->entry_num; i++) { + ret = copy_struct_from_user( + kdst + kdst_entry_size * i, kdst_entry_size, + user_array->uptr + user_array->entry_len * i, + user_array->entry_len); + if (ret) + return ret; + } + return 0; +} + +/** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @hw_info: report iommu hardware information. The data buffer returned by this @@ -509,19 +557,19 @@ static inline int __iommu_copy_struct_from_user_array( * @domain_alloc: allocate and return an iommu domain if success. Otherwise * NULL is returned. The domain is not fully initialized until * the caller iommu_domain_alloc() returns. - * @domain_alloc_user: Allocate an iommu domain corresponding to the input - * parameters as defined in include/uapi/linux/iommufd.h. - * Unlike @domain_alloc, it is called only by IOMMUFD and - * must fully initialize the new domain before return. - * Upon success, if the @user_data is valid and the @parent - * points to a kernel-managed domain, the new domain must be - * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be - * NULL while the @user_data can be optionally provided, the - * new domain must support __IOMMU_DOMAIN_PAGING. - * Upon failure, ERR_PTR must be returned. + * @domain_alloc_paging_flags: Allocate an iommu domain corresponding to the + * input parameters as defined in + * include/uapi/linux/iommufd.h. The @user_data can be + * optionally provided, the new domain must support + * __IOMMU_DOMAIN_PAGING. Upon failure, ERR_PTR must be + * returned. * @domain_alloc_paging: Allocate an iommu_domain that can be used for - * UNMANAGED, DMA, and DMA_FQ domain types. + * UNMANAGED, DMA, and DMA_FQ domain types. This is the + * same as invoking domain_alloc_paging_flags() with + * @flags=0, @user_data=NULL. A driver should implement + * only one of the two ops. * @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing. + * @domain_alloc_nested: Allocate an iommu_domain for nested translation. * @probe_device: Add device to iommu driver handling * @release_device: Remove device from iommu driver handling * @probe_finalize: Do final setup work after the device is added to an IOMMU @@ -542,6 +590,14 @@ static inline int __iommu_copy_struct_from_user_array( * @remove_dev_pasid: Remove any translation configurations of a specific * pasid, so that any DMA transactions with this pasid * will be blocked by the hardware. + * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind + * the @dev, as the set of virtualization resources shared/passed + * to user space IOMMU instance. And associate it with a nesting + * @parent_domain. The @viommu_type must be defined in the header + * include/uapi/linux/iommufd.h + * It is required to call iommufd_viommu_alloc() helper for + * a bundled allocation of the core and the driver structures, + * using the given @ictx pointer. * @pgsize_bitmap: bitmap of all possible supported page sizes * @owner: Driver module providing these ops * @identity_domain: An always available, always attachable identity @@ -562,12 +618,15 @@ struct iommu_ops { /* Domain allocation and freeing by the iommu driver */ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); - struct iommu_domain *(*domain_alloc_user)( - struct device *dev, u32 flags, struct iommu_domain *parent, + struct iommu_domain *(*domain_alloc_paging_flags)( + struct device *dev, u32 flags, const struct iommu_user_data *user_data); struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_domain *(*domain_alloc_sva)(struct device *dev, struct mm_struct *mm); + struct iommu_domain *(*domain_alloc_nested)( + struct device *dev, struct iommu_domain *parent, u32 flags, + const struct iommu_user_data *user_data); struct iommu_device *(*probe_device)(struct device *dev); void (*release_device)(struct device *dev); @@ -591,6 +650,10 @@ struct iommu_ops { void (*remove_dev_pasid)(struct device *dev, ioasid_t pasid, struct iommu_domain *domain); + struct iommufd_viommu *(*viommu_alloc)( + struct device *dev, struct iommu_domain *parent_domain, + struct iommufd_ctx *ictx, unsigned int viommu_type); + const struct iommu_domain_ops *default_domain_ops; unsigned long pgsize_bitmap; struct module *owner; @@ -616,7 +679,8 @@ struct iommu_ops { * * EBUSY - device is attached to a domain and cannot be changed * * ENODEV - device specific errors, not able to be attached * * <others> - treated as ENODEV by the caller. Use is discouraged - * @set_dev_pasid: set an iommu domain to a pasid of device + * @set_dev_pasid: set or replace an iommu domain to a pasid of device. The pasid of + * the device should be left in the old config in error case. * @map_pages: map a physically contiguous set of pages of the same size to * an iommu domain. * @unmap_pages: unmap a number of pages of the same size from an iommu domain @@ -635,14 +699,13 @@ struct iommu_ops { * @enforce_cache_coherency: Prevent any kind of DMA from bypassing IOMMU_CACHE, * including no-snoop TLPs on PCIe or other platform * specific mechanisms. - * @enable_nesting: Enable nesting * @set_pgtable_quirks: Set io page table quirks (IO_PGTABLE_QUIRK_*) * @free: Release the domain after use. */ struct iommu_domain_ops { int (*attach_dev)(struct iommu_domain *domain, struct device *dev); int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev, - ioasid_t pasid); + ioasid_t pasid, struct iommu_domain *old); int (*map_pages)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, @@ -663,7 +726,6 @@ struct iommu_domain_ops { dma_addr_t iova); bool (*enforce_cache_coherency)(struct iommu_domain *domain); - int (*enable_nesting)(struct iommu_domain *domain); int (*set_pgtable_quirks)(struct iommu_domain *domain, unsigned long quirks); @@ -784,12 +846,13 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather) }; } -extern int bus_iommu_probe(const struct bus_type *bus); -extern bool iommu_present(const struct bus_type *bus); extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap); extern bool iommu_group_has_isolated_msi(struct iommu_group *group); -extern struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus); -struct iommu_domain *iommu_paging_domain_alloc(struct device *dev); +struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, unsigned int flags); +static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) +{ + return iommu_paging_domain_alloc_flags(dev, 0); +} extern void iommu_domain_free(struct iommu_domain *domain); extern int iommu_attach_device(struct iommu_domain *domain, struct device *dev); @@ -844,7 +907,6 @@ extern void iommu_group_put(struct iommu_group *group); extern int iommu_group_id(struct iommu_group *group); extern struct iommu_domain *iommu_group_default_domain(struct iommu_group *); -int iommu_enable_nesting(struct iommu_domain *domain); int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirks); @@ -994,6 +1056,8 @@ struct iommu_fwspec { /* ATS is supported */ #define IOMMU_FWSPEC_PCI_RC_ATS (1 << 0) +/* CANWBS is supported */ +#define IOMMU_FWSPEC_PCI_RC_CANWBS (1 << 1) /* * An iommu attach handle represents a relationship between an iommu domain @@ -1081,19 +1145,15 @@ struct iommu_iotlb_gather {}; struct iommu_dirty_bitmap {}; struct iommu_dirty_ops {}; -static inline bool iommu_present(const struct bus_type *bus) -{ - return false; -} - static inline bool device_iommu_capable(struct device *dev, enum iommu_cap cap) { return false; } -static inline struct iommu_domain *iommu_domain_alloc(const struct bus_type *bus) +static inline struct iommu_domain *iommu_paging_domain_alloc_flags(struct device *dev, + unsigned int flags) { - return NULL; + return ERR_PTR(-ENODEV); } static inline struct iommu_domain *iommu_paging_domain_alloc(struct device *dev) diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h index 30f832a60ccb..11110c749200 100644 --- a/include/linux/iommufd.h +++ b/include/linux/iommufd.h @@ -8,16 +8,46 @@ #include <linux/err.h> #include <linux/errno.h> +#include <linux/refcount.h> #include <linux/types.h> +#include <linux/xarray.h> struct device; struct file; struct iommu_group; +struct iommu_user_data; +struct iommu_user_data_array; struct iommufd_access; struct iommufd_ctx; struct iommufd_device; +struct iommufd_viommu_ops; struct page; +enum iommufd_object_type { + IOMMUFD_OBJ_NONE, + IOMMUFD_OBJ_ANY = IOMMUFD_OBJ_NONE, + IOMMUFD_OBJ_DEVICE, + IOMMUFD_OBJ_HWPT_PAGING, + IOMMUFD_OBJ_HWPT_NESTED, + IOMMUFD_OBJ_IOAS, + IOMMUFD_OBJ_ACCESS, + IOMMUFD_OBJ_FAULT, + IOMMUFD_OBJ_VIOMMU, + IOMMUFD_OBJ_VDEVICE, +#ifdef CONFIG_IOMMUFD_TEST + IOMMUFD_OBJ_SELFTEST, +#endif + IOMMUFD_OBJ_MAX, +}; + +/* Base struct for all objects with a userspace ID handle. */ +struct iommufd_object { + refcount_t shortterm_users; + refcount_t users; + enum iommufd_object_type type; + unsigned int id; +}; + struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, struct device *dev, u32 *id); void iommufd_device_unbind(struct iommufd_device *idev); @@ -54,6 +84,45 @@ void iommufd_access_detach(struct iommufd_access *access); void iommufd_ctx_get(struct iommufd_ctx *ictx); +struct iommufd_viommu { + struct iommufd_object obj; + struct iommufd_ctx *ictx; + struct iommu_device *iommu_dev; + struct iommufd_hwpt_paging *hwpt; + + const struct iommufd_viommu_ops *ops; + + struct xarray vdevs; + + unsigned int type; +}; + +/** + * struct iommufd_viommu_ops - vIOMMU specific operations + * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory + * of the vIOMMU will be free-ed by iommufd core after calling this op + * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a + * nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data + * must be defined in include/uapi/linux/iommufd.h. + * It must fully initialize the new iommu_domain before + * returning. Upon failure, ERR_PTR must be returned. + * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for + * any IOMMU hardware specific cache: TLB and device cache. + * The @array passes in the cache invalidation requests, in + * form of a driver data structure. A driver must update the + * array->entry_num to report the number of handled requests. + * The data structure of the array entry must be defined in + * include/uapi/linux/iommufd.h + */ +struct iommufd_viommu_ops { + void (*destroy)(struct iommufd_viommu *viommu); + struct iommu_domain *(*alloc_domain_nested)( + struct iommufd_viommu *viommu, u32 flags, + const struct iommu_user_data *user_data); + int (*cache_invalidate)(struct iommufd_viommu *viommu, + struct iommu_user_data_array *array); +}; + #if IS_ENABLED(CONFIG_IOMMUFD) struct iommufd_ctx *iommufd_ctx_from_file(struct file *file); struct iommufd_ctx *iommufd_ctx_from_fd(int fd); @@ -111,4 +180,43 @@ static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx) return -EOPNOTSUPP; } #endif /* CONFIG_IOMMUFD */ + +#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE) +struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, + size_t size, + enum iommufd_object_type type); +struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, + unsigned long vdev_id); +#else /* !CONFIG_IOMMUFD_DRIVER_CORE */ +static inline struct iommufd_object * +_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size, + enum iommufd_object_type type) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct device * +iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id) +{ + return NULL; +} +#endif /* CONFIG_IOMMUFD_DRIVER_CORE */ + +/* + * Helpers for IOMMU driver to allocate driver structures that will be freed by + * the iommufd core. The free op will be called prior to freeing the memory. + */ +#define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops) \ + ({ \ + drv_struct *ret; \ + \ + static_assert(__same_type(struct iommufd_viommu, \ + ((drv_struct *)NULL)->member)); \ + static_assert(offsetof(drv_struct, member.obj) == 0); \ + ret = (drv_struct *)_iommufd_object_alloc( \ + ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU); \ + if (!IS_ERR(ret)) \ + ret->member.ops = viommu_ops; \ + ret; \ + }) #endif diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 19a7b00baff4..91324c331a4b 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -19,19 +19,19 @@ * @op: accessor function (takes @args as its arguments) * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * @sleep_before_read: if it is true, sleep @sleep_us before read. * @args: arguments for @op poll * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. Must not - * be called from atomic context if sleep_us or timeout_us are used. - * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. */ #define read_poll_timeout(op, val, cond, sleep_us, timeout_us, \ sleep_before_read, args...) \ @@ -64,22 +64,22 @@ * @op: accessor function (takes @args as its arguments) * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). Should - * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.rst). + * @delay_us: Time to udelay between reads in us (0 tight-loops). Please + * read udelay() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * @delay_before_read: if it is true, delay @delay_us before read. * @args: arguments for @op poll * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @args is stored in @val. - * * This macro does not rely on timekeeping. Hence it is safe to call even when * timekeeping is suspended, at the expense of an underestimation of wall clock * time, which is rather minimal with a non-zero delay_us. * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @args is stored in @val. */ #define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \ delay_before_read, args...) \ @@ -119,17 +119,17 @@ * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @addr is stored in @val. Must not - * be called from atomic context if sleep_us or timeout_us are used. - * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. Must not + * be called from atomic context if sleep_us or timeout_us are used. */ #define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ read_poll_timeout(op, val, cond, sleep_us, timeout_us, false, addr) @@ -140,16 +140,16 @@ * @addr: Address to poll * @val: Variable to read the value into * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). Should - * be less than ~10us since udelay is used (see - * Documentation/timers/timers-howto.rst). + * @delay_us: Time to udelay between reads in us (0 tight-loops). Please + * read udelay() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either - * case, the last read value at @addr is stored in @val. - * * When available, you'll probably want to use one of the specialized * macros defined below rather than this macro directly. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either + * case, the last read value at @addr is stored in @val. */ #define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, false, addr) diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 6e9fb667a1c5..5385349f0b8a 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -249,6 +249,38 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start); int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size); resource_size_t resource_alignment(struct resource *res); + +/** + * resource_set_size - Calculate resource end address from size and start + * @res: Resource descriptor + * @size: Size of the resource + * + * Calculate the end address for @res based on @size. + * + * Note: The start address of @res must be set when calling this function. + * Prefer resource_set_range() if setting both the start address and @size. + */ +static inline void resource_set_size(struct resource *res, resource_size_t size) +{ + res->end = res->start + size - 1; +} + +/** + * resource_set_range - Set resource start and end addresses + * @res: Resource descriptor + * @start: Start address for the resource + * @size: Size of the resource + * + * Set @res start address and calculate the end address based on @size. + */ +static inline void resource_set_range(struct resource *res, + resource_size_t start, + resource_size_t size) +{ + res->start = start; + resource_set_size(res, size); +} + static inline resource_size_t resource_size(const struct resource *res) { return res->end - res->start + 1; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 3f003d5fde53..57b074e0cfbb 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -18,6 +18,8 @@ #include <asm/irqflags.h> #include <asm/percpu.h> +struct task_struct; + /* Currently lockdep_softirqs_on/off is used only by lockdep */ #ifdef CONFIG_PROVE_LOCKING extern void lockdep_softirqs_on(unsigned long ip); @@ -25,12 +27,16 @@ extern void lockdep_hardirqs_on_prepare(void); extern void lockdep_hardirqs_on(unsigned long ip); extern void lockdep_hardirqs_off(unsigned long ip); + extern void lockdep_cleanup_dead_cpu(unsigned int cpu, + struct task_struct *idle); #else static inline void lockdep_softirqs_on(unsigned long ip) { } static inline void lockdep_softirqs_off(unsigned long ip) { } static inline void lockdep_hardirqs_on_prepare(void) { } static inline void lockdep_hardirqs_on(unsigned long ip) { } static inline void lockdep_hardirqs_off(unsigned long ip) { } + static inline void lockdep_cleanup_dead_cpu(unsigned int cpu, + struct task_struct *idle) {} #endif #ifdef CONFIG_TRACE_IRQFLAGS diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 3496baa0b07f..e97206c721a0 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h @@ -5,30 +5,36 @@ #include <uapi/linux/irqnr.h> -extern int nr_irqs; +unsigned int irq_get_nr_irqs(void) __pure; +unsigned int irq_set_nr_irqs(unsigned int nr); extern struct irq_desc *irq_to_desc(unsigned int irq); unsigned int irq_get_next_irq(unsigned int offset); -# define for_each_irq_desc(irq, desc) \ - for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ - irq++, desc = irq_to_desc(irq)) \ - if (!desc) \ - ; \ - else - +#define for_each_irq_desc(irq, desc) \ + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ + __nr_irqs__ = 0) \ + for (irq = 0, desc = irq_to_desc(irq); irq < __nr_irqs__; \ + irq++, desc = irq_to_desc(irq)) \ + if (!desc) \ + ; \ + else # define for_each_irq_desc_reverse(irq, desc) \ - for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \ - irq--, desc = irq_to_desc(irq)) \ + for (irq = irq_get_nr_irqs() - 1, desc = irq_to_desc(irq); \ + irq >= 0; irq--, desc = irq_to_desc(irq)) \ if (!desc) \ ; \ else -# define for_each_active_irq(irq) \ - for (irq = irq_get_next_irq(0); irq < nr_irqs; \ - irq = irq_get_next_irq(irq + 1)) +#define for_each_active_irq(irq) \ + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ + __nr_irqs__ = 0) \ + for (irq = irq_get_next_irq(0); irq < __nr_irqs__; \ + irq = irq_get_next_irq(irq + 1)) -#define for_each_irq_nr(irq) \ - for (irq = 0; irq < nr_irqs; irq++) +#define for_each_irq_nr(irq) \ + for (unsigned int __nr_irqs__ = irq_get_nr_irqs(); __nr_irqs__; \ + __nr_irqs__ = 0) \ + for (irq = 0; irq < __nr_irqs__; irq++) #endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 8aef9bb6ad57..50f7ea8714bf 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1796,22 +1796,21 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal) static inline u32 jbd2_chksum(journal_t *journal, u32 crc, const void *address, unsigned int length) { - struct { - struct shash_desc shash; - char ctx[JBD_MAX_CHECKSUM_SIZE]; - } desc; + DEFINE_RAW_FLEX(struct shash_desc, desc, __ctx, + DIV_ROUND_UP(JBD_MAX_CHECKSUM_SIZE, + sizeof(*((struct shash_desc *)0)->__ctx))); int err; BUG_ON(crypto_shash_descsize(journal->j_chksum_driver) > JBD_MAX_CHECKSUM_SIZE); - desc.shash.tfm = journal->j_chksum_driver; - *(u32 *)desc.ctx = crc; + desc->tfm = journal->j_chksum_driver; + *(u32 *)desc->__ctx = crc; - err = crypto_shash_update(&desc.shash, address, length); + err = crypto_shash_update(desc, address, length); BUG_ON(err); - return *(u32 *)desc.ctx; + return *(u32 *)desc->__ctx; } /* Return most recent uncommitted transaction */ diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 1220f0fbe5bf..ed945f42e064 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -502,7 +502,7 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m) * - all other values are converted to jiffies by either multiplying * the input value by a factor or dividing it with a factor and * handling any 32-bit overflows. - * for the details see __msecs_to_jiffies() + * for the details see _msecs_to_jiffies() * * msecs_to_jiffies() checks for the passed in value being a constant * via __builtin_constant_p() allowing gcc to eliminate most of the @@ -526,6 +526,19 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m) } } +/** + * secs_to_jiffies: - convert seconds to jiffies + * @_secs: time in seconds + * + * Conversion is done by simple multiplication with HZ + * + * secs_to_jiffies() is defined as a macro rather than a static inline + * function so it can be used in static initializers. + * + * Return: jiffies value + */ +#define secs_to_jiffies(_secs) ((_secs) * HZ) + extern unsigned long __usecs_to_jiffies(const unsigned int u); #if !(USEC_PER_SEC % HZ) static inline unsigned long _usecs_to_jiffies(const unsigned int u) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 00a3bf7c0d8f..6bbfc8aa42e8 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -29,6 +29,9 @@ typedef unsigned int __bitwise kasan_vmalloc_flags_t; #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) +#define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ +#define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ + #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #include <linux/pgtable.h> @@ -564,7 +567,8 @@ void kasan_populate_early_vm_area_shadow(void *start, unsigned long size); int kasan_populate_vmalloc(unsigned long addr, unsigned long size); void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end); + unsigned long free_region_end, + unsigned long flags); #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ @@ -579,7 +583,8 @@ static inline int kasan_populate_vmalloc(unsigned long start, static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) { } + unsigned long free_region_end, + unsigned long flags) { } #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ @@ -614,7 +619,8 @@ static inline int kasan_populate_vmalloc(unsigned long start, static inline void kasan_release_vmalloc(unsigned long start, unsigned long end, unsigned long free_region_start, - unsigned long free_region_end) { } + unsigned long free_region_end, + unsigned long flags) { } static inline void *kasan_unpoison_vmalloc(const void *start, unsigned long size, diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 564868bdce89..fd743d4c4b4b 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h @@ -37,7 +37,6 @@ */ #include <linux/array_size.h> -#include <linux/dma-mapping.h> #include <linux/spinlock.h> #include <linux/stddef.h> #include <linux/types.h> diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index 30baae91b225..1f46046080f5 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -2,8 +2,6 @@ #ifndef _LINUX_KHUGEPAGED_H #define _LINUX_KHUGEPAGED_H -#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ - extern unsigned int khugepaged_max_ptes_none __read_mostly; #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h index 6a3cd1bf4680..93a73c076d16 100644 --- a/include/linux/kmemleak.h +++ b/include/linux/kmemleak.h @@ -26,6 +26,7 @@ extern void kmemleak_free_part(const void *ptr, size_t size) __ref; extern void kmemleak_free_percpu(const void __percpu *ptr) __ref; extern void kmemleak_update_trace(const void *ptr) __ref; extern void kmemleak_not_leak(const void *ptr) __ref; +extern void kmemleak_transient_leak(const void *ptr) __ref; extern void kmemleak_ignore(const void *ptr) __ref; extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref; extern void kmemleak_no_scan(const void *ptr) __ref; @@ -93,6 +94,9 @@ static inline void kmemleak_update_trace(const void *ptr) static inline void kmemleak_not_leak(const void *ptr) { } +static inline void kmemleak_transient_leak(const void *ptr) +{ +} static inline void kmemleak_ignore(const void *ptr) { } diff --git a/include/linux/ksm.h b/include/linux/ksm.h index ec9c05044d4f..6a53ac4885bb 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -13,7 +13,6 @@ #include <linux/pagemap.h> #include <linux/rmap.h> #include <linux/sched.h> -#include <linux/sched/coredump.h> #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, @@ -91,7 +90,7 @@ struct folio *ksm_might_need_to_copy(struct folio *folio, void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc); void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); -void collect_procs_ksm(struct folio *folio, struct page *page, +void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); @@ -123,8 +122,9 @@ static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) { } -static inline void collect_procs_ksm(struct folio *folio, struct page *page, - struct list_head *to_kill, int force_early) +static inline void collect_procs_ksm(const struct folio *folio, + const struct page *page, struct list_head *to_kill, + int force_early) { } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 45be36e5285f..401439bb21e3 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -97,6 +97,7 @@ #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) #define KVM_PFN_ERR_SIGPENDING (KVM_PFN_ERR_MASK + 3) +#define KVM_PFN_ERR_NEEDS_IO (KVM_PFN_ERR_MASK + 4) /* * error pfns indicate that the gfn is in slot but faild to @@ -153,13 +154,6 @@ static inline bool kvm_is_error_gpa(gpa_t gpa) return gpa == INVALID_GPA; } -#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) - -static inline bool is_error_page(struct page *page) -{ - return IS_ERR(page); -} - #define KVM_REQUEST_MASK GENMASK(7,0) #define KVM_REQUEST_NO_WAKEUP BIT(8) #define KVM_REQUEST_WAIT BIT(9) @@ -219,6 +213,7 @@ enum kvm_bus { KVM_PIO_BUS, KVM_VIRTIO_CCW_NOTIFY_BUS, KVM_FAST_MMIO_BUS, + KVM_IOCSR_BUS, KVM_NR_BUSES }; @@ -279,21 +274,19 @@ enum { READING_SHADOW_PAGE_TABLES, }; -#define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) - struct kvm_host_map { /* * Only valid if the 'pfn' is managed by the host kernel (i.e. There is * a 'struct page' for it. When using mem= kernel parameter some memory * can be used as guest memory but they are not managed by host * kernel). - * If 'pfn' is not managed by the host kernel, this field is - * initialized to KVM_UNMAPPED_PAGE. */ + struct page *pinned_page; struct page *page; void *hva; kvm_pfn_t pfn; kvm_pfn_t gfn; + bool writable; }; /* @@ -342,7 +335,8 @@ struct kvm_vcpu { #ifndef __KVM_HAVE_ARCH_WQP struct rcuwait wait; #endif - struct pid __rcu *pid; + struct pid *pid; + rwlock_t pid_lock; int sigset_active; sigset_t sigset; unsigned int halt_poll_ns; @@ -1176,6 +1170,10 @@ static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_ kvm_memslot_iter_is_valid(iter, end); \ kvm_memslot_iter_next(iter)) +struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); +struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); +struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); + /* * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: * - create a new memory slot @@ -1214,33 +1212,70 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm); void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); -int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, - struct page **pages, int nr_pages); +int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn, + struct page **pages, int nr_pages); + +struct page *__gfn_to_page(struct kvm *kvm, gfn_t gfn, bool write); +static inline struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) +{ + return __gfn_to_page(kvm, gfn, true); +} -struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, bool *writable); + +static inline void kvm_release_page_unused(struct page *page) +{ + if (!page) + return; + + put_page(page); +} + void kvm_release_page_clean(struct page *page); void kvm_release_page_dirty(struct page *page); -kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); -kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, - bool *writable); -kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn); -kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn, - bool atomic, bool interruptible, bool *async, - bool write_fault, bool *writable, hva_t *hva); - -void kvm_release_pfn_clean(kvm_pfn_t pfn); -void kvm_release_pfn_dirty(kvm_pfn_t pfn); -void kvm_set_pfn_dirty(kvm_pfn_t pfn); -void kvm_set_pfn_accessed(kvm_pfn_t pfn); - -void kvm_release_pfn(kvm_pfn_t pfn, bool dirty); +static inline void kvm_release_faultin_page(struct kvm *kvm, struct page *page, + bool unused, bool dirty) +{ + lockdep_assert_once(lockdep_is_held(&kvm->mmu_lock) || unused); + + if (!page) + return; + + /* + * If the page that KVM got from the *primary MMU* is writable, and KVM + * installed or reused a SPTE, mark the page/folio dirty. Note, this + * may mark a folio dirty even if KVM created a read-only SPTE, e.g. if + * the GFN is write-protected. Folios can't be safely marked dirty + * outside of mmu_lock as doing so could race with writeback on the + * folio. As a result, KVM can't mark folios dirty in the fast page + * fault handler, and so KVM must (somewhat) speculatively mark the + * folio dirty if KVM could locklessly make the SPTE writable. + */ + if (unused) + kvm_release_page_unused(page); + else if (dirty) + kvm_release_page_dirty(page); + else + kvm_release_page_clean(page); +} + +kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn, + unsigned int foll, bool *writable, + struct page **refcounted_page); + +static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, + bool write, bool *writable, + struct page **refcounted_page) +{ + return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, + write ? FOLL_WRITE : 0, writable, refcounted_page); +} + int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, int len); int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); @@ -1304,17 +1339,28 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, }) int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); -struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn); void mark_page_dirty(struct kvm *kvm, gfn_t gfn); -struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); -struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); -int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); -void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); +int __kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map, + bool writable); +void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map); + +static inline int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, + struct kvm_host_map *map) +{ + return __kvm_vcpu_map(vcpu, gpa, map, true); +} + +static inline int kvm_vcpu_map_readonly(struct kvm_vcpu *vcpu, gpa_t gpa, + struct kvm_host_map *map) +{ + return __kvm_vcpu_map(vcpu, gpa, map, false); +} + unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, @@ -1686,9 +1732,6 @@ void kvm_arch_sync_events(struct kvm *kvm); int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); -struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn); -bool kvm_is_zone_device_page(struct page *page); - struct kvm_irq_ack_notifier { struct hlist_node link; unsigned gsi; @@ -2382,12 +2425,6 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) } #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ -typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); - -int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, - uintptr_t data, const char *name, - struct task_struct **thread_ptr); - #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) { @@ -2461,11 +2498,13 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn) #ifdef CONFIG_KVM_PRIVATE_MEM int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, - gfn_t gfn, kvm_pfn_t *pfn, int *max_order); + gfn_t gfn, kvm_pfn_t *pfn, struct page **page, + int *max_order); #else static inline int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, gfn_t gfn, - kvm_pfn_t *pfn, int *max_order) + kvm_pfn_t *pfn, struct page **page, + int *max_order) { KVM_BUG_ON(1, kvm); return -EIO; diff --git a/include/linux/lcd.h b/include/linux/lcd.h index 68703a51dc53..c3ccdff4519a 100644 --- a/include/linux/lcd.h +++ b/include/linux/lcd.h @@ -12,7 +12,11 @@ #include <linux/device.h> #include <linux/mutex.h> #include <linux/notifier.h> -#include <linux/fb.h> + +#define LCD_POWER_ON (0) +#define LCD_POWER_REDUCED (1) // deprecated; don't use in new code +#define LCD_POWER_REDUCED_VSYNC_SUSPEND (2) // deprecated; don't use in new code +#define LCD_POWER_OFF (4) /* Notes on locking: * @@ -30,7 +34,6 @@ */ struct lcd_device; -struct fb_info; struct lcd_properties { /* The maximum value for contrast (read-only) */ @@ -47,11 +50,23 @@ struct lcd_ops { int (*get_contrast)(struct lcd_device *); /* Set LCD panel contrast */ int (*set_contrast)(struct lcd_device *, int contrast); - /* Set LCD panel mode (resolutions ...) */ - int (*set_mode)(struct lcd_device *, struct fb_videomode *); - /* Check if given framebuffer device is the one LCD is bound to; - return 0 if not, !=0 if it is. If NULL, lcd always matches the fb. */ - int (*check_fb)(struct lcd_device *, struct fb_info *); + + /* + * Set LCD panel mode (resolutions ...) + */ + int (*set_mode)(struct lcd_device *lcd, u32 xres, u32 yres); + + /* + * Check if the LCD controls the given display device. This + * operation is optional and if not implemented it is assumed that + * the display is always the one controlled by the LCD. + * + * RETURNS: + * + * If display_dev is NULL or display_dev matches the device controlled by + * the LCD, return true. Otherwise return false. + */ + bool (*controls_device)(struct lcd_device *lcd, struct device *display_device); }; struct lcd_device { diff --git a/include/linux/leds.h b/include/linux/leds.h index e5968c3ed4ae..98f9719c924c 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -171,6 +171,7 @@ struct led_classdev { int new_blink_brightness; void (*flash_resume)(struct led_classdev *led_cdev); + struct workqueue_struct *wq; /* LED workqueue */ struct work_struct set_brightness_work; int delayed_set_value; unsigned long delayed_delay_on; @@ -238,7 +239,7 @@ struct led_classdev { struct kernfs_node *brightness_hw_changed_kn; #endif - /* Ensures consistent access to the LED Flash Class device */ + /* Ensures consistent access to the LED class device */ struct mutex led_access; }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 9b4a6ff03235..c1a85d46eba6 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -270,9 +270,7 @@ enum { /* bits 24:31 of host->flags are reserved for LLD specific flags */ - /* various lengths of time */ - ATA_TMOUT_BOOT = 30000, /* heuristic */ - ATA_TMOUT_BOOT_QUICK = 7000, /* heuristic */ + /* Various lengths of time */ ATA_TMOUT_INTERNAL_QUICK = 5000, ATA_TMOUT_MAX_PARK = 30000, diff --git a/include/linux/list.h b/include/linux/list.h index 5f4b0a39cf46..29a375889fb8 100644 --- a/include/linux/list.h +++ b/include/linux/list.h @@ -687,14 +687,6 @@ static inline void list_splice_tail_init(struct list_head *list, for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next) /** - * list_for_each_reverse - iterate backwards over a list - * @pos: the &struct list_head to use as a loop cursor. - * @head: the head for your list. - */ -#define list_for_each_reverse(pos, head) \ - for (pos = (head)->prev; pos != (head); pos = pos->prev) - -/** * list_for_each_rcu - Iterate over a list in an RCU-safe fashion * @pos: the &struct list_head to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 5099a8ccd5f4..05c166811f6b 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -32,6 +32,8 @@ struct list_lru_one { struct list_head list; /* may become negative during memcg reparenting */ long nr_items; + /* protects all fields above */ + spinlock_t lock; }; struct list_lru_memcg { @@ -41,11 +43,9 @@ struct list_lru_memcg { }; struct list_lru_node { - /* protects all lists on the node, including per cgroup */ - spinlock_t lock; /* global list, used for the root cgroup in cgroup aware lrus */ struct list_lru_one lru; - long nr_items; + atomic_long_t nr_items; } ____cacheline_aligned_in_smp; struct list_lru { @@ -56,16 +56,28 @@ struct list_lru { bool memcg_aware; struct xarray xa; #endif +#ifdef CONFIG_LOCKDEP + struct lock_class_key *key; +#endif }; void list_lru_destroy(struct list_lru *lru); int __list_lru_init(struct list_lru *lru, bool memcg_aware, - struct lock_class_key *key, struct shrinker *shrinker); + struct shrinker *shrinker); #define list_lru_init(lru) \ - __list_lru_init((lru), false, NULL, NULL) + __list_lru_init((lru), false, NULL) #define list_lru_init_memcg(lru, shrinker) \ - __list_lru_init((lru), true, NULL, shrinker) + __list_lru_init((lru), true, shrinker) + +static inline int list_lru_init_memcg_key(struct list_lru *lru, struct shrinker *shrinker, + struct lock_class_key *key) +{ +#ifdef CONFIG_LOCKDEP + lru->key = key; +#endif + return list_lru_init_memcg(lru, shrinker); +} int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, gfp_t gfp); @@ -172,7 +184,7 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, struct list_head *head); typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, - struct list_lru_one *list, spinlock_t *lock, void *cb_arg); + struct list_lru_one *list, void *cb_arg); /** * list_lru_walk_one: walk a @lru, isolating and disposing freeable items. diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 61c4b9c41904..c8f0f9458f2c 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -278,9 +278,9 @@ __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, int, struct nlm_cookie *, int); __be32 nlmsvc_unlock(struct net *net, struct nlm_file *, struct nlm_lock *); -__be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *, - struct nlm_host *, struct nlm_lock *, - struct nlm_lock *, struct nlm_cookie *); +__be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, + struct nlm_host *host, struct nlm_lock *lock, + struct nlm_lock *conflock); __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *); void nlmsvc_retry_blocked(struct svc_rqst *rqstp); void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *, diff --git a/include/linux/lockd/xdr.h b/include/linux/lockd/xdr.h index 80cca9426761..17d53165d9f2 100644 --- a/include/linux/lockd/xdr.h +++ b/include/linux/lockd/xdr.h @@ -73,8 +73,6 @@ struct nlm_args { u32 fsm_mode; }; -typedef struct nlm_args nlm_args; - /* * Generic lockd result */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 217f7abf2cbf..67964dc4db95 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -173,7 +173,7 @@ static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, (lock)->dep_map.lock_type) #define lockdep_set_subclass(lock, sub) \ - lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\ + lockdep_init_map_type(&(lock)->dep_map, (lock)->dep_map.name, (lock)->dep_map.key, sub,\ (lock)->dep_map.wait_type_inner, \ (lock)->dep_map.wait_type_outer, \ (lock)->dep_map.lock_type) diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h index babf4e3c28ba..8f1a9408302f 100644 --- a/include/linux/logic_pio.h +++ b/include/linux/logic_pio.h @@ -17,7 +17,7 @@ enum { struct logic_pio_hwaddr { struct list_head list; - struct fwnode_handle *fwnode; + const struct fwnode_handle *fwnode; resource_size_t hw_start; resource_size_t io_start; resource_size_t size; /* range size populated */ @@ -110,8 +110,8 @@ void logic_outsl(unsigned long addr, const void *buffer, unsigned int count); #endif /* CONFIG_INDIRECT_PIO */ #define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE) -struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode); -unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode, +struct logic_pio_hwaddr *find_io_range_by_fwnode(const struct fwnode_handle *fwnode); +unsigned long logic_pio_trans_hwaddr(const struct fwnode_handle *fwnode, resource_size_t hw_addr, resource_size_t size); int logic_pio_register_range(struct logic_pio_hwaddr *newrange); void logic_pio_unregister_range(struct logic_pio_hwaddr *range); diff --git a/include/linux/lsm/apparmor.h b/include/linux/lsm/apparmor.h new file mode 100644 index 000000000000..612cbfacb072 --- /dev/null +++ b/include/linux/lsm/apparmor.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * AppArmor presents single pointer to an aa_label structure. + */ +#ifndef __LINUX_LSM_APPARMOR_H +#define __LINUX_LSM_APPARMOR_H + +struct aa_label; + +struct lsm_prop_apparmor { +#ifdef CONFIG_SECURITY_APPARMOR + struct aa_label *label; +#endif +}; + +#endif /* ! __LINUX_LSM_APPARMOR_H */ diff --git a/include/linux/lsm/bpf.h b/include/linux/lsm/bpf.h new file mode 100644 index 000000000000..8106e206fcef --- /dev/null +++ b/include/linux/lsm/bpf.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * BPF may present a single u32 value. + */ +#ifndef __LINUX_LSM_BPF_H +#define __LINUX_LSM_BPF_H +#include <linux/types.h> + +struct lsm_prop_bpf { +#ifdef CONFIG_BPF_LSM + u32 secid; +#endif +}; + +#endif /* ! __LINUX_LSM_BPF_H */ diff --git a/include/linux/lsm/selinux.h b/include/linux/lsm/selinux.h new file mode 100644 index 000000000000..9455a6b5b910 --- /dev/null +++ b/include/linux/lsm/selinux.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * SELinux presents a single u32 value which is known as a secid. + */ +#ifndef __LINUX_LSM_SELINUX_H +#define __LINUX_LSM_SELINUX_H +#include <linux/types.h> + +struct lsm_prop_selinux { +#ifdef CONFIG_SECURITY_SELINUX + u32 secid; +#endif +}; + +#endif /* ! __LINUX_LSM_SELINUX_H */ diff --git a/include/linux/lsm/smack.h b/include/linux/lsm/smack.h new file mode 100644 index 000000000000..ff730dd7a734 --- /dev/null +++ b/include/linux/lsm/smack.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Linux Security Module interface to other subsystems. + * Smack presents a pointer into the global Smack label list. + */ +#ifndef __LINUX_LSM_SMACK_H +#define __LINUX_LSM_SMACK_H + +struct smack_known; + +struct lsm_prop_smack { +#ifdef CONFIG_SECURITY_SMACK + struct smack_known *skp; +#endif +}; + +#endif /* ! __LINUX_LSM_SMACK_H */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 9eca013aa5e1..eb2937599cb0 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -176,7 +176,8 @@ LSM_HOOK(int, -EOPNOTSUPP, inode_setsecurity, struct inode *inode, const char *name, const void *value, size_t size, int flags) LSM_HOOK(int, 0, inode_listsecurity, struct inode *inode, char *buffer, size_t buffer_size) -LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, inode_getlsmprop, struct inode *inode, + struct lsm_prop *prop) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src, const char *name) @@ -217,6 +218,8 @@ LSM_HOOK(int, 0, cred_prepare, struct cred *new, const struct cred *old, LSM_HOOK(void, LSM_RET_VOID, cred_transfer, struct cred *new, const struct cred *old) LSM_HOOK(void, LSM_RET_VOID, cred_getsecid, const struct cred *c, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, cred_getlsmprop, const struct cred *c, + struct lsm_prop *prop) LSM_HOOK(int, 0, kernel_act_as, struct cred *new, u32 secid) LSM_HOOK(int, 0, kernel_create_files_as, struct cred *new, struct inode *inode) LSM_HOOK(int, 0, kernel_module_request, char *kmod_name) @@ -235,9 +238,9 @@ LSM_HOOK(int, 0, task_fix_setgroups, struct cred *new, const struct cred * old) LSM_HOOK(int, 0, task_setpgid, struct task_struct *p, pid_t pgid) LSM_HOOK(int, 0, task_getpgid, struct task_struct *p) LSM_HOOK(int, 0, task_getsid, struct task_struct *p) -LSM_HOOK(void, LSM_RET_VOID, current_getsecid_subj, u32 *secid) -LSM_HOOK(void, LSM_RET_VOID, task_getsecid_obj, - struct task_struct *p, u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, current_getlsmprop_subj, struct lsm_prop *prop) +LSM_HOOK(void, LSM_RET_VOID, task_getlsmprop_obj, + struct task_struct *p, struct lsm_prop *prop) LSM_HOOK(int, 0, task_setnice, struct task_struct *p, int nice) LSM_HOOK(int, 0, task_setioprio, struct task_struct *p, int ioprio) LSM_HOOK(int, 0, task_getioprio, struct task_struct *p) @@ -256,8 +259,8 @@ LSM_HOOK(void, LSM_RET_VOID, task_to_inode, struct task_struct *p, struct inode *inode) LSM_HOOK(int, 0, userns_create, const struct cred *cred) LSM_HOOK(int, 0, ipc_permission, struct kern_ipc_perm *ipcp, short flag) -LSM_HOOK(void, LSM_RET_VOID, ipc_getsecid, struct kern_ipc_perm *ipcp, - u32 *secid) +LSM_HOOK(void, LSM_RET_VOID, ipc_getlsmprop, struct kern_ipc_perm *ipcp, + struct lsm_prop *prop) LSM_HOOK(int, 0, msg_msg_alloc_security, struct msg_msg *msg) LSM_HOOK(void, LSM_RET_VOID, msg_msg_free_security, struct msg_msg *msg) LSM_HOOK(int, 0, msg_queue_alloc_security, struct kern_ipc_perm *perm) @@ -294,6 +297,8 @@ LSM_HOOK(int, -EINVAL, setprocattr, const char *name, void *value, size_t size) LSM_HOOK(int, 0, ismaclabel, const char *name) LSM_HOOK(int, -EOPNOTSUPP, secid_to_secctx, u32 secid, char **secdata, u32 *seclen) +LSM_HOOK(int, -EOPNOTSUPP, lsmprop_to_secctx, struct lsm_prop *prop, + char **secdata, u32 *seclen) LSM_HOOK(int, 0, secctx_to_secid, const char *secdata, u32 seclen, u32 *secid) LSM_HOOK(void, LSM_RET_VOID, release_secctx, char *secdata, u32 seclen) LSM_HOOK(void, LSM_RET_VOID, inode_invalidate_secctx, struct inode *inode) @@ -416,7 +421,8 @@ LSM_HOOK(void, LSM_RET_VOID, key_post_create_or_update, struct key *keyring, LSM_HOOK(int, 0, audit_rule_init, u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp) LSM_HOOK(int, 0, audit_rule_known, struct audit_krule *krule) -LSM_HOOK(int, 0, audit_rule_match, u32 secid, u32 field, u32 op, void *lsmrule) +LSM_HOOK(int, 0, audit_rule_match, struct lsm_prop *prop, u32 field, u32 op, + void *lsmrule) LSM_HOOK(void, LSM_RET_VOID, audit_rule_free, void *lsmrule) #endif /* CONFIG_AUDIT */ diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index c2c11004085e..cbbcd18d4186 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -224,7 +224,7 @@ typedef struct { /* nothing */ } lockdep_map_p; * (set at tree creation time) and dynamic information set under the spinlock. * * Another use of flags are to indicate global states of the tree. This is the - * case with the MAPLE_USE_RCU flag, which indicates the tree is currently in + * case with the MT_FLAGS_USE_RCU flag, which indicates the tree is currently in * RCU mode. This mode was added to allow the tree to reuse nodes instead of * re-allocating and RCU freeing nodes when there is a single user. */ @@ -592,6 +592,20 @@ static __always_inline void mas_reset(struct ma_state *mas) #define mas_for_each(__mas, __entry, __max) \ while (((__entry) = mas_find((__mas), (__max))) != NULL) +/** + * mas_for_each_rev() - Iterate over a range of the maple tree in reverse order. + * @__mas: Maple Tree operation state (maple_state) + * @__entry: Entry retrieved from the tree + * @__min: minimum index to retrieve from the tree + * + * When returned, mas->index and mas->last will hold the entire range for the + * entry. + * + * Note: may return the zero entry. + */ +#define mas_for_each_rev(__mas, __entry, __min) \ + while (((__entry) = mas_find_rev((__mas), (__min))) != NULL) + #ifdef CONFIG_DEBUG_MAPLE_TREE enum mt_dump_format { mt_dump_dec, diff --git a/include/linux/mdio.h b/include/linux/mdio.h index efeca5bd7600..3c3deac57894 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h @@ -165,31 +165,12 @@ extern int mdio_set_flag(const struct mdio_if_info *mdio, bool sense); extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds); extern int mdio45_nway_restart(const struct mdio_if_info *mdio); -extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio, - struct ethtool_cmd *ecmd, - u32 npage_adv, u32 npage_lpa); extern void mdio45_ethtool_ksettings_get_npage(const struct mdio_if_info *mdio, struct ethtool_link_ksettings *cmd, u32 npage_adv, u32 npage_lpa); /** - * mdio45_ethtool_gset - get settings for ETHTOOL_GSET - * @mdio: MDIO interface - * @ecmd: Ethtool request structure - * - * Since the CSRs for auto-negotiation using next pages are not fully - * standardised, this function does not attempt to decode them. Use - * mdio45_ethtool_gset_npage() to specify advertisement bits from next - * pages. - */ -static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio, - struct ethtool_cmd *ecmd) -{ - mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0); -} - -/** * mdio45_ethtool_ksettings_get - get settings for ETHTOOL_GLINKSETTINGS * @mdio: MDIO interface * @cmd: Ethtool request structure diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e1b41554a5fb..5502aa8e138e 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -299,25 +299,10 @@ struct mem_cgroup { /* For oom notifier event fd */ struct list_head oom_notify; - /* - * Should we move charges of a task when a task is moved into this - * mem_cgroup ? And what type of charges should we move ? - */ - unsigned long move_charge_at_immigrate; - /* taken only while moving_account > 0 */ - spinlock_t move_lock; - unsigned long move_lock_flags; - /* Legacy tcp memory accounting */ bool tcpmem_active; int tcpmem_pressure; - /* - * set > 0 if pages under this cgroup are moving to other cgroup. - */ - atomic_t moving_account; - struct task_struct *move_lock_task; - /* List of events which userspace want to receive */ struct list_head event_list; spinlock_t event_list_lock; @@ -433,9 +418,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) * * - the folio lock * - LRU isolation - * - folio_memcg_lock() * - exclusive reference - * - mem_cgroup_trylock_pages() * * For a kmem folio a caller should hold an rcu read lock to protect memcg * associated with a kmem folio from being released. @@ -460,35 +443,6 @@ static inline bool folio_memcg_charged(struct folio *folio) return __folio_memcg(folio) != NULL; } -/** - * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. - * @folio: Pointer to the folio. - * - * This function assumes that the folio is known to have a - * proper memory cgroup pointer. It's not safe to call this function - * against some type of folios, e.g. slab folios or ex-slab folios. - * - * Return: A pointer to the memory cgroup associated with the folio, - * or NULL. - */ -static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) -{ - unsigned long memcg_data = READ_ONCE(folio->memcg_data); - - VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - - if (memcg_data & MEMCG_DATA_KMEM) { - struct obj_cgroup *objcg; - - objcg = (void *)(memcg_data & ~OBJEXTS_FLAGS_MASK); - return obj_cgroup_memcg(objcg); - } - - WARN_ON_ONCE(!rcu_read_lock_held()); - - return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); -} - /* * folio_memcg_check - Get the memory cgroup associated with a folio. * @folio: Pointer to the folio. @@ -504,9 +458,7 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) * * - the folio lock * - LRU isolation - * - lock_folio_memcg() * - exclusive reference - * - mem_cgroup_trylock_pages() * * For a kmem folio a caller should hold an rcu read lock to protect memcg * associated with a kmem folio from being released. @@ -1103,10 +1055,9 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) return NULL; } -static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) +static inline bool folio_memcg_charged(struct folio *folio) { - WARN_ON_ONCE(!rcu_read_lock_held()); - return NULL; + return false; } static inline struct mem_cgroup *folio_memcg_check(struct folio *folio) @@ -1282,6 +1233,10 @@ struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) return NULL; } +static inline void obj_cgroup_get(struct obj_cgroup *objcg) +{ +} + static inline void obj_cgroup_put(struct obj_cgroup *objcg) { } @@ -1874,26 +1829,6 @@ static inline bool task_in_memcg_oom(struct task_struct *p) return p->memcg_in_oom; } -void folio_memcg_lock(struct folio *folio); -void folio_memcg_unlock(struct folio *folio); - -/* try to stablize folio_memcg() for all the pages in a memcg */ -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -{ - rcu_read_lock(); - - if (mem_cgroup_disabled() || !atomic_read(&memcg->moving_account)) - return true; - - rcu_read_unlock(); - return false; -} - -static inline void mem_cgroup_unlock_pages(void) -{ - rcu_read_unlock(); -} - static inline void mem_cgroup_enter_user_fault(void) { WARN_ON(current->in_user_fault); @@ -1915,26 +1850,6 @@ unsigned long memcg1_soft_limit_reclaim(pg_data_t *pgdat, int order, return 0; } -static inline void folio_memcg_lock(struct folio *folio) -{ -} - -static inline void folio_memcg_unlock(struct folio *folio) -{ -} - -static inline bool mem_cgroup_trylock_pages(struct mem_cgroup *memcg) -{ - /* to match folio_memcg_rcu() */ - rcu_read_lock(); - return true; -} - -static inline void mem_cgroup_unlock_pages(void) -{ - rcu_read_unlock(); -} - static inline bool task_in_memcg_oom(struct task_struct *p) { return false; diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 1add16f21612..ce9885e0178a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -47,7 +47,7 @@ struct mempolicy { atomic_t refcnt; unsigned short mode; /* See MPOL_* above */ unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ - nodemask_t nodes; /* interleave/bind/perfer */ + nodemask_t nodes; /* interleave/bind/preferred/etc */ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */ union { diff --git a/include/linux/memstick.h b/include/linux/memstick.h index ebf73d4ee969..107bdcbedf79 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -293,7 +293,7 @@ struct memstick_host { }; struct memstick_driver { - struct memstick_device_id *id_table; + const struct memstick_device_id *id_table; int (*probe)(struct memstick_dev *card); void (*remove)(struct memstick_dev *card); int (*suspend)(struct memstick_dev *card, diff --git a/include/linux/mfd/88pm886.h b/include/linux/mfd/88pm886.h index 133aa302e492..85eca44f39ab 100644 --- a/include/linux/mfd/88pm886.h +++ b/include/linux/mfd/88pm886.h @@ -31,6 +31,15 @@ #define PM886_INT_WC BIT(1) #define PM886_INT_MASK_MODE BIT(2) +#define PM886_REG_RTC_CNT1 0xd1 +#define PM886_REG_RTC_CNT2 0xd2 +#define PM886_REG_RTC_CNT3 0xd3 +#define PM886_REG_RTC_CNT4 0xd4 +#define PM886_REG_RTC_SPARE1 0xea +#define PM886_REG_RTC_SPARE2 0xeb +#define PM886_REG_RTC_SPARE3 0xec +#define PM886_REG_RTC_SPARE4 0xed +#define PM886_REG_RTC_SPARE5 0xee #define PM886_REG_RTC_SPARE6 0xef #define PM886_REG_BUCK_EN 0x08 diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index f4dfc1871a95..c3df0e615fbf 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -19,6 +19,7 @@ enum axp20x_variants { AXP223_ID, AXP288_ID, AXP313A_ID, + AXP323_ID, AXP717_ID, AXP803_ID, AXP806_ID, @@ -113,6 +114,7 @@ enum axp20x_variants { #define AXP313A_SHUTDOWN_CTRL 0x1a #define AXP313A_IRQ_EN 0x20 #define AXP313A_IRQ_STATE 0x21 +#define AXP323_DCDC_MODE_CTRL2 0x22 #define AXP717_ON_INDICATE 0x00 #define AXP717_PMU_STATUS_2 0x01 @@ -959,7 +961,7 @@ struct axp20x_dev { unsigned long irq_flags; struct regmap *regmap; struct regmap_irq_chip_data *regmap_irqc; - long variant; + enum axp20x_variants variant; int nr_cells; const struct mfd_cell *cells; const struct regmap_config *regmap_cfg; diff --git a/include/linux/mfd/cgbc.h b/include/linux/mfd/cgbc.h new file mode 100644 index 000000000000..badbec4c7033 --- /dev/null +++ b/include/linux/mfd/cgbc.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Congatec Board Controller driver definitions + * + * Copyright (C) 2024 Bootlin + * Author: Thomas Richard <thomas.richard@bootlin.com> + */ + +#ifndef _LINUX_MFD_CGBC_H_ + +/** + * struct cgbc_version - Board Controller device version structure + * @feature: Board Controller feature number + * @major: Board Controller major revision + * @minor: Board Controller minor revision + */ +struct cgbc_version { + unsigned char feature; + unsigned char major; + unsigned char minor; +}; + +/** + * struct cgbc_device_data - Internal representation of the Board Controller device + * @io_session: Pointer to the session IO memory + * @io_cmd: Pointer to the command IO memory + * @session: Session id returned by the Board Controller + * @dev: Pointer to kernel device structure + * @cgbc_version: Board Controller version structure + * @mutex: Board Controller mutex + */ +struct cgbc_device_data { + void __iomem *io_session; + void __iomem *io_cmd; + u8 session; + struct device *dev; + struct cgbc_version version; + struct mutex lock; +}; + +int cgbc_command(struct cgbc_device_data *cgbc, void *cmd, unsigned int cmd_size, + void *data, unsigned int data_size, u8 *status); + +#endif /*_LINUX_MFD_CGBC_H_*/ diff --git a/include/linux/mfd/max5970.h b/include/linux/mfd/max5970.h index 762a7d40c843..fc50e89edfaa 100644 --- a/include/linux/mfd/max5970.h +++ b/include/linux/mfd/max5970.h @@ -16,18 +16,6 @@ #define MAX5978_NUM_SWITCHES 1 #define MAX5970_NUM_LEDS 4 -struct max5970_data { - int num_switches; - u32 irng[MAX5970_NUM_SWITCHES]; - u32 mon_rng[MAX5970_NUM_SWITCHES]; - u32 shunt_micro_ohms[MAX5970_NUM_SWITCHES]; -}; - -enum max5970_chip_type { - TYPE_MAX5978 = 1, - TYPE_MAX5970, -}; - #define MAX5970_REG_CURRENT_L(ch) (0x01 + (ch) * 4) #define MAX5970_REG_CURRENT_H(ch) (0x00 + (ch) * 4) #define MAX5970_REG_VOLTAGE_L(ch) (0x03 + (ch) * 4) diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 20c5e02ed9da..c324d548619e 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -419,17 +419,6 @@ enum max77693_haptic_reg { #define MAX77693_CONFIG2_MEN 6 #define MAX77693_CONFIG2_HTYP 5 -enum max77693_irq_source { - LED_INT = 0, - TOPSYS_INT, - CHG_INT, - MUIC_INT1, - MUIC_INT2, - MUIC_INT3, - - MAX77693_IRQ_GROUP_NR, -}; - #define SRC_IRQ_CHARGER BIT(0) #define SRC_IRQ_TOP BIT(1) #define SRC_IRQ_FLASH BIT(2) diff --git a/include/linux/mfd/mt6328/core.h b/include/linux/mfd/mt6328/core.h new file mode 100644 index 000000000000..9a08aed72b9f --- /dev/null +++ b/include/linux/mfd/mt6328/core.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2015 MediaTek Inc. + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#ifndef __MFD_MT6328_CORE_H__ +#define __MFD_MT6328_CORE_H__ + +enum mt6328_irq_status_numbers { + MT6328_IRQ_STATUS_PWRKEY = 0, + MT6328_IRQ_STATUS_HOMEKEY, + MT6328_IRQ_STATUS_PWRKEY_R, + MT6328_IRQ_STATUS_HOMEKEY_R, + MT6328_IRQ_STATUS_THR_H, + MT6328_IRQ_STATUS_THR_L, + MT6328_IRQ_STATUS_BAT_H, + MT6328_IRQ_STATUS_BAT_L, + MT6328_IRQ_STATUS_RTC, + MT6328_IRQ_STATUS_AUDIO, + MT6328_IRQ_STATUS_ACCDET, + MT6328_IRQ_STATUS_ACCDET_EINT, + MT6328_IRQ_STATUS_ACCDET_NEGV, + MT6328_IRQ_STATUS_NI_LBAT_INT, + MT6328_IRQ_STATUS_VPROC_OC = 16, + MT6328_IRQ_STATUS_VSYS_OC, + MT6328_IRQ_STATUS_VLTE_OC, + MT6328_IRQ_STATUS_VCORE_OC, + MT6328_IRQ_STATUS_VPA_OC, + MT6328_IRQ_STATUS_LDO_OC, + MT6328_IRQ_STATUS_BAT2_H, + MT6328_IRQ_STATUS_BAT2_L, + MT6328_IRQ_STATUS_VISMPS0_H, + MT6328_IRQ_STATUS_VISMPS0_L, + MT6328_IRQ_STATUS_AUXADC_IMP, + MT6328_IRQ_STATUS_OV = 32, + MT6328_IRQ_STATUS_BVALID_DET, + MT6328_IRQ_STATUS_VBATON_HV, + MT6328_IRQ_STATUS_VBATON_UNDET, + MT6328_IRQ_STATUS_WATCHDOG, + MT6328_IRQ_STATUS_PCHR_CM_VDEC, + MT6328_IRQ_STATUS_CHRDET, + MT6328_IRQ_STATUS_PCHR_CM_VINC, + MT6328_IRQ_STATUS_FG_BAT_H, + MT6328_IRQ_STATUS_FG_BAT_L, + MT6328_IRQ_STATUS_FG_CUR_H, + MT6328_IRQ_STATUS_FG_CUR_L, + MT6328_IRQ_STATUS_FG_ZCV, + MT6328_IRQ_STATUS_SPKL_D, + MT6328_IRQ_STATUS_SPKL_AB, +}; + +#endif /* __MFD_MT6323_CORE_H__ */ diff --git a/include/linux/mfd/mt6328/registers.h b/include/linux/mfd/mt6328/registers.h new file mode 100644 index 000000000000..8199aaea27b9 --- /dev/null +++ b/include/linux/mfd/mt6328/registers.h @@ -0,0 +1,822 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Yassine Oudjana <y.oudjana@protonmail.com> + */ + +#ifndef __MFD_MT6328_REGISTERS_H__ +#define __MFD_MT6328_REGISTERS_H__ + +/* PMIC Registers */ +#define MT6328_STRUP_CON0 0x0000 +#define MT6328_STRUP_CON2 0x0002 +#define MT6328_STRUP_CON3 0x0004 +#define MT6328_STRUP_CON4 0x0006 +#define MT6328_STRUP_CON5 0x0008 +#define MT6328_STRUP_CON6 0x000a +#define MT6328_STRUP_CON7 0x000c +#define MT6328_STRUP_CON8 0x000e +#define MT6328_STRUP_CON9 0x0010 +#define MT6328_STRUP_CON10 0x0012 +#define MT6328_STRUP_CON11 0x0014 +#define MT6328_STRUP_CON12 0x0016 +#define MT6328_STRUP_CON13 0x0018 +#define MT6328_STRUP_CON14 0x001a +#define MT6328_STRUP_CON15 0x001c +#define MT6328_STRUP_CON16 0x001e +#define MT6328_STRUP_CON17 0x0020 +#define MT6328_STRUP_CON18 0x0022 +#define MT6328_STRUP_CON19 0x0024 +#define MT6328_STRUP_CON20 0x0026 +#define MT6328_STRUP_CON21 0x0028 +#define MT6328_STRUP_CON22 0x002a +#define MT6328_STRUP_CON23 0x002c +#define MT6328_STRUP_CON24 0x002e +#define MT6328_STRUP_CON25 0x0030 +#define MT6328_STRUP_CON26 0x0032 +#define MT6328_STRUP_CON27 0x0034 +#define MT6328_STRUP_CON28 0x0036 +#define MT6328_STRUP_CON29 0x0038 +#define MT6328_STRUP_CON30 0x003a +#define MT6328_STRUP_CON31 0x003c +#define MT6328_STRUP_CON32 0x003e +#define MT6328_STRUP_ANA_CON0 0x0040 +#define MT6328_HWCID 0x0200 +#define MT6328_SWCID 0x0202 +#define MT6328_TOP_CON 0x0204 +#define MT6328_TEST_OUT 0x0206 +#define MT6328_TEST_CON0 0x0208 +#define MT6328_TEST_CON1 0x020a +#define MT6328_TESTMODE_SW 0x020c +#define MT6328_EN_STATUS0 0x020e +#define MT6328_EN_STATUS1 0x0210 +#define MT6328_EN_STATUS2 0x0212 +#define MT6328_OCSTATUS0 0x0214 +#define MT6328_OCSTATUS1 0x0216 +#define MT6328_OCSTATUS2 0x0218 +#define MT6328_PGDEBSTATUS 0x021a +#define MT6328_PGSTATUS 0x021c +#define MT6328_THERMALSTATUS 0x021e +#define MT6328_TOPSTATUS 0x0220 +#define MT6328_TDSEL_CON 0x0222 +#define MT6328_RDSEL_CON 0x0224 +#define MT6328_SMT_CON0 0x0226 +#define MT6328_SMT_CON1 0x0228 +#define MT6328_SMT_CON2 0x022a +#define MT6328_DRV_CON0 0x022c +#define MT6328_DRV_CON1 0x022e +#define MT6328_DRV_CON2 0x0230 +#define MT6328_DRV_CON3 0x0232 +#define MT6328_TOP_STATUS 0x0234 +#define MT6328_TOP_STATUS_SET 0x0236 +#define MT6328_TOP_STATUS_CLR 0x0238 +#define MT6328_RGS_ANA_MON 0x023a +#define MT6328_TOP_CKPDN_CON0 0x023c +#define MT6328_TOP_CKPDN_CON0_SET 0x023e +#define MT6328_TOP_CKPDN_CON0_CLR 0x0240 +#define MT6328_TOP_CKPDN_CON1 0x0242 +#define MT6328_TOP_CKPDN_CON1_SET 0x0244 +#define MT6328_TOP_CKPDN_CON1_CLR 0x0246 +#define MT6328_TOP_CKPDN_CON2 0x0248 +#define MT6328_TOP_CKPDN_CON2_SET 0x024a +#define MT6328_TOP_CKPDN_CON2_CLR 0x024c +#define MT6328_TOP_CKPDN_CON3 0x024e +#define MT6328_TOP_CKPDN_CON3_SET 0x0250 +#define MT6328_TOP_CKPDN_CON3_CLR 0x0252 +#define MT6328_TOP_CKPDN_CON4 0x0254 +#define MT6328_TOP_CKPDN_CON4_SET 0x0256 +#define MT6328_TOP_CKPDN_CON4_CLR 0x0258 +#define MT6328_TOP_CKSEL_CON0 0x025a +#define MT6328_TOP_CKSEL_CON0_SET 0x025c +#define MT6328_TOP_CKSEL_CON0_CLR 0x025e +#define MT6328_TOP_CKSEL_CON1 0x0260 +#define MT6328_TOP_CKSEL_CON1_SET 0x0262 +#define MT6328_TOP_CKSEL_CON1_CLR 0x0264 +#define MT6328_TOP_CKSEL_CON2 0x0266 +#define MT6328_TOP_CKSEL_CON2_SET 0x0268 +#define MT6328_TOP_CKSEL_CON2_CLR 0x026a +#define MT6328_TOP_CKDIVSEL_CON0 0x026c +#define MT6328_TOP_CKDIVSEL_CON0_SET 0x026e +#define MT6328_TOP_CKDIVSEL_CON0_CLR 0x0270 +#define MT6328_TOP_CKDIVSEL_CON1 0x0272 +#define MT6328_TOP_CKDIVSEL_CON1_SET 0x0274 +#define MT6328_TOP_CKDIVSEL_CON1_CLR 0x0276 +#define MT6328_TOP_CKHWEN_CON0 0x0278 +#define MT6328_TOP_CKHWEN_CON0_SET 0x027a +#define MT6328_TOP_CKHWEN_CON0_CLR 0x027c +#define MT6328_TOP_CKHWEN_CON1 0x027e +#define MT6328_TOP_CKHWEN_CON1_SET 0x0280 +#define MT6328_TOP_CKHWEN_CON1_CLR 0x0282 +#define MT6328_TOP_CKTST_CON0 0x0284 +#define MT6328_TOP_CKTST_CON1 0x0286 +#define MT6328_TOP_CKTST_CON2 0x0288 +#define MT6328_TOP_CLKSQ 0x028a +#define MT6328_TOP_CLKSQ_SET 0x028c +#define MT6328_TOP_CLKSQ_CLR 0x028e +#define MT6328_TOP_CLKSQ_RTC 0x0290 +#define MT6328_TOP_CLKSQ_RTC_SET 0x0292 +#define MT6328_TOP_CLKSQ_RTC_CLR 0x0294 +#define MT6328_TOP_CLK_TRIM 0x0296 +#define MT6328_TOP_RST_CON0 0x0298 +#define MT6328_TOP_RST_CON0_SET 0x029a +#define MT6328_TOP_RST_CON0_CLR 0x029c +#define MT6328_TOP_RST_CON1 0x029e +#define MT6328_TOP_RST_MISC 0x02a0 +#define MT6328_TOP_RST_MISC_SET 0x02a2 +#define MT6328_TOP_RST_MISC_CLR 0x02a4 +#define MT6328_TOP_RST_STATUS 0x02a6 +#define MT6328_TOP_RST_STATUS_SET 0x02a8 +#define MT6328_TOP_RST_STATUS_CLR 0x02aa +#define MT6328_INT_CON0 0x02ac +#define MT6328_INT_CON0_SET 0x02ae +#define MT6328_INT_CON0_CLR 0x02b0 +#define MT6328_INT_CON1 0x02b2 +#define MT6328_INT_CON1_SET 0x02b4 +#define MT6328_INT_CON1_CLR 0x02b6 +#define MT6328_INT_CON2 0x02b8 +#define MT6328_INT_CON2_SET 0x02ba +#define MT6328_INT_CON2_CLR 0x02bc +#define MT6328_INT_MISC_CON 0x02be +#define MT6328_INT_MISC_CON_SET 0x02c0 +#define MT6328_INT_MISC_CON_CLR 0x02c2 +#define MT6328_INT_STATUS0 0x02c4 +#define MT6328_INT_STATUS1 0x02c6 +#define MT6328_INT_STATUS2 0x02c8 +#define MT6328_OC_GEAR_0 0x02ca +#define MT6328_FQMTR_CON0 0x02cc +#define MT6328_FQMTR_CON1 0x02ce +#define MT6328_FQMTR_CON2 0x02d0 +#define MT6328_RG_SPI_CON 0x02d2 +#define MT6328_DEW_DIO_EN 0x02d4 +#define MT6328_DEW_READ_TEST 0x02d6 +#define MT6328_DEW_WRITE_TEST 0x02d8 +#define MT6328_DEW_CRC_SWRST 0x02da +#define MT6328_DEW_CRC_EN 0x02dc +#define MT6328_DEW_CRC_VAL 0x02de +#define MT6328_DEW_DBG_MON_SEL 0x02e0 +#define MT6328_DEW_CIPHER_KEY_SEL 0x02e2 +#define MT6328_DEW_CIPHER_IV_SEL 0x02e4 +#define MT6328_DEW_CIPHER_EN 0x02e6 +#define MT6328_DEW_CIPHER_RDY 0x02e8 +#define MT6328_DEW_CIPHER_MODE 0x02ea +#define MT6328_DEW_CIPHER_SWRST 0x02ec +#define MT6328_DEW_RDDMY_NO 0x02ee +#define MT6328_INT_TYPE_CON0 0x02f0 +#define MT6328_INT_TYPE_CON0_SET 0x02f2 +#define MT6328_INT_TYPE_CON0_CLR 0x02f4 +#define MT6328_INT_TYPE_CON1 0x02f6 +#define MT6328_INT_TYPE_CON1_SET 0x02f8 +#define MT6328_INT_TYPE_CON1_CLR 0x02fa +#define MT6328_INT_TYPE_CON2 0x02fc +#define MT6328_INT_TYPE_CON2_SET 0x02fe +#define MT6328_INT_TYPE_CON2_CLR 0x0300 +#define MT6328_INT_STA 0x0302 +#define MT6328_BUCK_ALL_CON0 0x0400 +#define MT6328_BUCK_ALL_CON1 0x0402 +#define MT6328_BUCK_ALL_CON2 0x0404 +#define MT6328_BUCK_ALL_CON3 0x0406 +#define MT6328_BUCK_ALL_CON4 0x0408 +#define MT6328_BUCK_ALL_CON5 0x040a +#define MT6328_BUCK_ALL_CON6 0x040c +#define MT6328_BUCK_ALL_CON9 0x040e +#define MT6328_BUCK_ALL_CON12 0x0410 +#define MT6328_BUCK_ALL_CON13 0x0412 +#define MT6328_BUCK_ALL_CON14 0x0414 +#define MT6328_BUCK_ALL_CON16 0x0416 +#define MT6328_BUCK_ALL_CON18 0x0418 +#define MT6328_BUCK_ALL_CON19 0x041a +#define MT6328_BUCK_ALL_CON20 0x041c +#define MT6328_BUCK_ALL_CON21 0x041e +#define MT6328_BUCK_ALL_CON22 0x0420 +#define MT6328_BUCK_ALL_CON23 0x0422 +#define MT6328_BUCK_ALL_CON24 0x0424 +#define MT6328_BUCK_ALL_CON25 0x0426 +#define MT6328_BUCK_ALL_CON26 0x0428 +#define MT6328_BUCK_ALL_CON27 0x042a +#define MT6328_BUCK_ALL_CON28 0x042c +#define MT6328_SMPS_TOP_ANA_CON0 0x042e +#define MT6328_SMPS_TOP_ANA_CON1 0x0430 +#define MT6328_SMPS_TOP_ANA_CON2 0x0432 +#define MT6328_SMPS_TOP_ANA_CON3 0x0434 +#define MT6328_SMPS_TOP_ANA_CON4 0x0436 +#define MT6328_SMPS_TOP_ANA_CON5 0x0438 +#define MT6328_SMPS_TOP_ANA_CON6 0x043a +#define MT6328_SMPS_TOP_ANA_CON7 0x043c +#define MT6328_SMPS_TOP_ANA_CON8 0x043e +#define MT6328_VCORE_ANA_CON0 0x0440 +#define MT6328_VCORE_ANA_CON1 0x0442 +#define MT6328_VCORE_ANA_CON2 0x0444 +#define MT6328_VCORE_ANA_CON3 0x0446 +#define MT6328_VCORE_ANA_CON4 0x0448 +#define MT6328_VSYS22_ANA_CON0 0x044a +#define MT6328_VSYS22_ANA_CON1 0x044c +#define MT6328_VSYS22_ANA_CON2 0x044e +#define MT6328_VSYS22_ANA_CON3 0x0450 +#define MT6328_VSYS22_ANA_CON4 0x0452 +#define MT6328_VPROC_ANA_CON0 0x0454 +#define MT6328_VPROC_ANA_CON1 0x0456 +#define MT6328_VPROC_ANA_CON2 0x0458 +#define MT6328_VPROC_ANA_CON3 0x045a +#define MT6328_VPROC_ANA_CON4 0x045c +#define MT6328_OSC32_ANA_CON0 0x045e +#define MT6328_OSC32_ANA_CON1 0x0460 +#define MT6328_VPA_ANA_CON0 0x0462 +#define MT6328_VPA_ANA_CON1 0x0464 +#define MT6328_VPA_ANA_CON2 0x0466 +#define MT6328_VPA_ANA_CON3 0x0468 +#define MT6328_VLTE_ANA_CON0 0x046a +#define MT6328_VLTE_ANA_CON1 0x046c +#define MT6328_VLTE_ANA_CON2 0x046e +#define MT6328_VLTE_ANA_CON3 0x0470 +#define MT6328_VLTE_ANA_CON4 0x0472 +#define MT6328_VPROC_CON0 0x0474 +#define MT6328_VPROC_CON1 0x0476 +#define MT6328_VPROC_CON2 0x0478 +#define MT6328_VPROC_CON3 0x047a +#define MT6328_VPROC_CON4 0x047c +#define MT6328_VPROC_CON5 0x047e +#define MT6328_VPROC_CON6 0x0480 +#define MT6328_VPROC_CON7 0x0482 +#define MT6328_VPROC_CON8 0x0484 +#define MT6328_VPROC_CON9 0x0486 +#define MT6328_VPROC_CON10 0x0488 +#define MT6328_VPROC_CON11 0x048a +#define MT6328_VPROC_CON12 0x048c +#define MT6328_VPROC_CON13 0x048e +#define MT6328_VPROC_CON14 0x0490 +#define MT6328_VPROC_CON15 0x0492 +#define MT6328_VPROC_CON16 0x0494 +#define MT6328_VPROC_CON17 0x0496 +#define MT6328_VPROC_CON18 0x0498 +#define MT6328_VPROC_CON19 0x049a +#define MT6328_VSRAM_CON0 0x049c +#define MT6328_VSRAM_CON1 0x049e +#define MT6328_VSRAM_CON2 0x04a0 +#define MT6328_VSRAM_CON3 0x04a2 +#define MT6328_VSRAM_CON4 0x04a4 +#define MT6328_VSRAM_CON5 0x04a6 +#define MT6328_VSRAM_CON6 0x04a8 +#define MT6328_VSRAM_CON7 0x04aa +#define MT6328_VSRAM_CON8 0x04ac +#define MT6328_VSRAM_CON9 0x04ae +#define MT6328_VSRAM_CON10 0x04b0 +#define MT6328_VSRAM_CON11 0x04b2 +#define MT6328_VSRAM_CON12 0x04b4 +#define MT6328_VSRAM_CON13 0x04b6 +#define MT6328_VSRAM_CON14 0x04b8 +#define MT6328_VSRAM_CON15 0x04ba +#define MT6328_VSRAM_CON16 0x04bc +#define MT6328_VSRAM_CON17 0x04be +#define MT6328_VSRAM_CON18 0x04c0 +#define MT6328_VSRAM_CON19 0x04c2 +#define MT6328_VLTE_CON0 0x04c4 +#define MT6328_VLTE_CON1 0x04c6 +#define MT6328_VLTE_CON2 0x04c8 +#define MT6328_VLTE_CON3 0x04ca +#define MT6328_VLTE_CON4 0x04cc +#define MT6328_VLTE_CON5 0x04ce +#define MT6328_VLTE_CON6 0x04d0 +#define MT6328_VLTE_CON7 0x04d2 +#define MT6328_VLTE_CON8 0x04d4 +#define MT6328_VLTE_CON9 0x04d6 +#define MT6328_VLTE_CON10 0x04d8 +#define MT6328_VLTE_CON11 0x04da +#define MT6328_VLTE_CON12 0x04dc +#define MT6328_VLTE_CON13 0x04de +#define MT6328_VLTE_CON14 0x04e0 +#define MT6328_VLTE_CON15 0x04e2 +#define MT6328_VLTE_CON16 0x04e4 +#define MT6328_VLTE_CON17 0x04e6 +#define MT6328_VLTE_CON18 0x04e8 +#define MT6328_VLTE_CON19 0x04ea +#define MT6328_VCORE1_CON0 0x0600 +#define MT6328_VCORE1_CON1 0x0602 +#define MT6328_VCORE1_CON2 0x0604 +#define MT6328_VCORE1_CON3 0x0606 +#define MT6328_VCORE1_CON4 0x0608 +#define MT6328_VCORE1_CON5 0x060a +#define MT6328_VCORE1_CON6 0x060c +#define MT6328_VCORE1_CON7 0x060e +#define MT6328_VCORE1_CON8 0x0610 +#define MT6328_VCORE1_CON9 0x0612 +#define MT6328_VCORE1_CON10 0x0614 +#define MT6328_VCORE1_CON11 0x0616 +#define MT6328_VCORE1_CON12 0x0618 +#define MT6328_VCORE1_CON13 0x061a +#define MT6328_VCORE1_CON14 0x061c +#define MT6328_VCORE1_CON15 0x061e +#define MT6328_VCORE1_CON16 0x0620 +#define MT6328_VCORE1_CON17 0x0622 +#define MT6328_VCORE1_CON18 0x0624 +#define MT6328_VCORE1_CON19 0x0626 +#define MT6328_VSYS22_CON0 0x0628 +#define MT6328_VSYS22_CON1 0x062a +#define MT6328_VSYS22_CON2 0x062c +#define MT6328_VSYS22_CON3 0x062e +#define MT6328_VSYS22_CON4 0x0630 +#define MT6328_VSYS22_CON5 0x0632 +#define MT6328_VSYS22_CON6 0x0634 +#define MT6328_VSYS22_CON7 0x0636 +#define MT6328_VSYS22_CON8 0x0638 +#define MT6328_VSYS22_CON9 0x063a +#define MT6328_VSYS22_CON10 0x063c +#define MT6328_VSYS22_CON11 0x063e +#define MT6328_VSYS22_CON12 0x0640 +#define MT6328_VSYS22_CON13 0x0642 +#define MT6328_VSYS22_CON14 0x0644 +#define MT6328_VSYS22_CON15 0x0646 +#define MT6328_VSYS22_CON16 0x0648 +#define MT6328_VSYS22_CON17 0x064a +#define MT6328_VSYS22_CON18 0x064c +#define MT6328_VSYS22_CON19 0x064e +#define MT6328_VPA_CON0 0x0650 +#define MT6328_VPA_CON1 0x0652 +#define MT6328_VPA_CON2 0x0654 +#define MT6328_VPA_CON3 0x0656 +#define MT6328_VPA_CON4 0x0658 +#define MT6328_VPA_CON5 0x065a +#define MT6328_VPA_CON6 0x065c +#define MT6328_VPA_CON7 0x065e +#define MT6328_VPA_CON8 0x0660 +#define MT6328_VPA_CON9 0x0662 +#define MT6328_VPA_CON10 0x0664 +#define MT6328_VPA_CON11 0x0666 +#define MT6328_VPA_CON12 0x0668 +#define MT6328_VPA_CON13 0x066a +#define MT6328_VPA_CON14 0x066c +#define MT6328_VPA_CON15 0x066e +#define MT6328_VPA_CON16 0x0670 +#define MT6328_VPA_CON17 0x0672 +#define MT6328_VPA_CON18 0x0674 +#define MT6328_VPA_CON19 0x0676 +#define MT6328_VPA_CON20 0x0678 +#define MT6328_VPA_CON21 0x067a +#define MT6328_VPA_CON22 0x067c +#define MT6328_VPA_CON23 0x067e +#define MT6328_VPA_CON24 0x0680 +#define MT6328_BUCK_K_CON0 0x0682 +#define MT6328_BUCK_K_CON1 0x0684 +#define MT6328_BUCK_K_CON2 0x0686 +#define MT6328_BUCK_K_CON3 0x0688 +#define MT6328_ZCD_CON0 0x0800 +#define MT6328_ZCD_CON1 0x0802 +#define MT6328_ZCD_CON2 0x0804 +#define MT6328_ZCD_CON3 0x0806 +#define MT6328_ZCD_CON4 0x0808 +#define MT6328_ZCD_CON5 0x080a +#define MT6328_ISINK0_CON0 0x080c +#define MT6328_ISINK0_CON1 0x080e +#define MT6328_ISINK0_CON2 0x0810 +#define MT6328_ISINK0_CON3 0x0812 +#define MT6328_ISINK1_CON0 0x0814 +#define MT6328_ISINK1_CON1 0x0816 +#define MT6328_ISINK1_CON2 0x0818 +#define MT6328_ISINK1_CON3 0x081a +#define MT6328_ISINK2_CON1 0x081c +#define MT6328_ISINK3_CON1 0x081e +#define MT6328_ISINK_ANA0 0x0820 +#define MT6328_ISINK_ANA1 0x0822 +#define MT6328_ISINK_PHASE_DLY 0x0824 +#define MT6328_ISINK_SFSTR 0x0826 +#define MT6328_ISINK_EN_CTRL 0x0828 +#define MT6328_ISINK_MODE_CTRL 0x082a +#define MT6328_VTCXO_0_CON0 0x0a00 +#define MT6328_VTCXO_1_CON0 0x0a02 +#define MT6328_VAUD28_CON0 0x0a04 +#define MT6328_VAUX18_CON0 0x0a06 +#define MT6328_VRF18_0_CON0 0x0a08 +#define MT6328_VRF18_0_CON1 0x0a0a +#define MT6328_VCAMA_CON0 0x0a0c +#define MT6328_VCN28_CON0 0x0a0e +#define MT6328_VCN33_CON0 0x0a10 +#define MT6328_VCN33_CON1 0x0a12 +#define MT6328_VCN33_CON2 0x0a14 +#define MT6328_VRF18_1_CON0 0x0a16 +#define MT6328_VRF18_1_CON1 0x0a18 +#define MT6328_VUSB33_CON0 0x0a1a +#define MT6328_VMCH_CON0 0x0a1c +#define MT6328_VMCH_CON1 0x0a1e +#define MT6328_VMC_CON0 0x0a20 +#define MT6328_VMC_CON1 0x0a22 +#define MT6328_VEMC_3V3_CON0 0x0a24 +#define MT6328_VEMC_3V3_CON1 0x0a26 +#define MT6328_VIO28_CON0 0x0a28 +#define MT6328_VCAMAF_CON0 0x0a2a +#define MT6328_VGP1_CON0 0x0a2c +#define MT6328_VGP1_CON1 0x0a2e +#define MT6328_VEFUSE_CON0 0x0a30 +#define MT6328_VSIM1_CON0 0x0a32 +#define MT6328_VSIM2_CON0 0x0a34 +#define MT6328_VIO18_CON0 0x0a36 +#define MT6328_VIBR_CON0 0x0a38 +#define MT6328_VCN18_CON0 0x0a3a +#define MT6328_VCAM_CON0 0x0a3c +#define MT6328_VCAMIO_CON0 0x0a3e +#define MT6328_LDO_VSRAM_CON0 0x0a40 +#define MT6328_LDO_VSRAM_CON1 0x0a42 +#define MT6328_VTREF_CON0 0x0a44 +#define MT6328_VM_CON0 0x0a46 +#define MT6328_VM_CON1 0x0a48 +#define MT6328_VRTC_CON0 0x0a4a +#define MT6328_LDO_OCFB0 0x0a4c +#define MT6328_ALDO_ANA_CON0 0x0a4e +#define MT6328_ADLDO_ANA_CON1 0x0a50 +#define MT6328_ADLDO_ANA_CON2 0x0a52 +#define MT6328_ADLDO_ANA_CON3 0x0a54 +#define MT6328_ADLDO_ANA_CON4 0x0a56 +#define MT6328_ADLDO_ANA_CON5 0x0a58 +#define MT6328_ADLDO_ANA_CON6 0x0a5a +#define MT6328_ADLDO_ANA_CON7 0x0a5c +#define MT6328_ADLDO_ANA_CON8 0x0a5e +#define MT6328_ADLDO_ANA_CON9 0x0a60 +#define MT6328_ADLDO_ANA_CON10 0x0a62 +#define MT6328_ADLDO_ANA_CON11 0x0a64 +#define MT6328_ADLDO_ANA_CON12 0x0a66 +#define MT6328_ADLDO_ANA_CON13 0x0a68 +#define MT6328_DLDO_ANA_CON0 0x0a6a +#define MT6328_DLDO_ANA_CON1 0x0a6c +#define MT6328_DLDO_ANA_CON2 0x0a6e +#define MT6328_DLDO_ANA_CON3 0x0a70 +#define MT6328_DLDO_ANA_CON4 0x0a72 +#define MT6328_DLDO_ANA_CON5 0x0a74 +#define MT6328_SLDO_ANA_CON0 0x0a76 +#define MT6328_SLDO_ANA_CON1 0x0a78 +#define MT6328_SLDO_ANA_CON2 0x0a7a +#define MT6328_SLDO_ANA_CON3 0x0a7c +#define MT6328_SLDO_ANA_CON4 0x0a7e +#define MT6328_SLDO_ANA_CON5 0x0a80 +#define MT6328_SLDO_ANA_CON6 0x0a82 +#define MT6328_SLDO_ANA_CON7 0x0a84 +#define MT6328_SLDO_ANA_CON8 0x0a86 +#define MT6328_SLDO_ANA_CON9 0x0a88 +#define MT6328_SLDO_ANA_CON10 0x0a8a +#define MT6328_LDO_RSV_CON0 0x0a8c +#define MT6328_LDO_RSV_CON1 0x0a8e +#define MT6328_SPK_CON0 0x0a90 +#define MT6328_SPK_CON1 0x0a92 +#define MT6328_SPK_CON2 0x0a94 +#define MT6328_SPK_CON3 0x0a96 +#define MT6328_SPK_CON4 0x0a98 +#define MT6328_SPK_CON5 0x0a9a +#define MT6328_SPK_CON6 0x0a9c +#define MT6328_SPK_CON7 0x0a9e +#define MT6328_SPK_CON8 0x0aa0 +#define MT6328_SPK_CON9 0x0aa2 +#define MT6328_SPK_CON10 0x0aa4 +#define MT6328_SPK_CON11 0x0aa6 +#define MT6328_SPK_CON12 0x0aa8 +#define MT6328_SPK_CON13 0x0aaa +#define MT6328_SPK_CON14 0x0aac +#define MT6328_SPK_CON15 0x0aae +#define MT6328_SPK_CON16 0x0ab0 +#define MT6328_SPK_ANA_CON0 0x0ab2 +#define MT6328_SPK_ANA_CON1 0x0ab4 +#define MT6328_SPK_ANA_CON3 0x0ab6 +#define MT6328_OTP_CON0 0x0c00 +#define MT6328_OTP_CON1 0x0c02 +#define MT6328_OTP_CON2 0x0c04 +#define MT6328_OTP_CON3 0x0c06 +#define MT6328_OTP_CON4 0x0c08 +#define MT6328_OTP_CON5 0x0c0a +#define MT6328_OTP_CON6 0x0c0c +#define MT6328_OTP_CON7 0x0c0e +#define MT6328_OTP_CON8 0x0c10 +#define MT6328_OTP_CON9 0x0c12 +#define MT6328_OTP_CON10 0x0c14 +#define MT6328_OTP_CON11 0x0c16 +#define MT6328_OTP_CON12 0x0c18 +#define MT6328_OTP_CON13 0x0c1a +#define MT6328_OTP_CON14 0x0c1c +#define MT6328_OTP_DOUT_0_15 0x0c1e +#define MT6328_OTP_DOUT_16_31 0x0c20 +#define MT6328_OTP_DOUT_32_47 0x0c22 +#define MT6328_OTP_DOUT_48_63 0x0c24 +#define MT6328_OTP_DOUT_64_79 0x0c26 +#define MT6328_OTP_DOUT_80_95 0x0c28 +#define MT6328_OTP_DOUT_96_111 0x0c2a +#define MT6328_OTP_DOUT_112_127 0x0c2c +#define MT6328_OTP_DOUT_128_143 0x0c2e +#define MT6328_OTP_DOUT_144_159 0x0c30 +#define MT6328_OTP_DOUT_160_175 0x0c32 +#define MT6328_OTP_DOUT_176_191 0x0c34 +#define MT6328_OTP_DOUT_192_207 0x0c36 +#define MT6328_OTP_DOUT_208_223 0x0c38 +#define MT6328_OTP_DOUT_224_239 0x0c3a +#define MT6328_OTP_DOUT_240_255 0x0c3c +#define MT6328_OTP_DOUT_256_271 0x0c3e +#define MT6328_OTP_DOUT_272_287 0x0c40 +#define MT6328_OTP_DOUT_288_303 0x0c42 +#define MT6328_OTP_DOUT_304_319 0x0c44 +#define MT6328_OTP_DOUT_320_335 0x0c46 +#define MT6328_OTP_DOUT_336_351 0x0c48 +#define MT6328_OTP_DOUT_352_367 0x0c4a +#define MT6328_OTP_DOUT_368_383 0x0c4c +#define MT6328_OTP_DOUT_384_399 0x0c4e +#define MT6328_OTP_DOUT_400_415 0x0c50 +#define MT6328_OTP_DOUT_416_431 0x0c52 +#define MT6328_OTP_DOUT_432_447 0x0c54 +#define MT6328_OTP_DOUT_448_463 0x0c56 +#define MT6328_OTP_DOUT_464_479 0x0c58 +#define MT6328_OTP_DOUT_480_495 0x0c5a +#define MT6328_OTP_DOUT_496_511 0x0c5c +#define MT6328_OTP_VAL_0_15 0x0c5e +#define MT6328_OTP_VAL_16_31 0x0c60 +#define MT6328_OTP_VAL_32_47 0x0c62 +#define MT6328_OTP_VAL_48_63 0x0c64 +#define MT6328_OTP_VAL_64_79 0x0c66 +#define MT6328_OTP_VAL_80_95 0x0c68 +#define MT6328_OTP_VAL_96_111 0x0c6a +#define MT6328_OTP_VAL_112_127 0x0c6c +#define MT6328_OTP_VAL_128_143 0x0c6e +#define MT6328_OTP_VAL_144_159 0x0c70 +#define MT6328_OTP_VAL_160_175 0x0c72 +#define MT6328_OTP_VAL_176_191 0x0c74 +#define MT6328_OTP_VAL_192_207 0x0c76 +#define MT6328_OTP_VAL_208_223 0x0c78 +#define MT6328_OTP_VAL_224_239 0x0c7a +#define MT6328_OTP_VAL_240_255 0x0c7c +#define MT6328_OTP_VAL_256_271 0x0c7e +#define MT6328_OTP_VAL_272_287 0x0c80 +#define MT6328_OTP_VAL_288_303 0x0c82 +#define MT6328_OTP_VAL_304_319 0x0c84 +#define MT6328_OTP_VAL_320_335 0x0c86 +#define MT6328_OTP_VAL_336_351 0x0c88 +#define MT6328_OTP_VAL_352_367 0x0c8a +#define MT6328_OTP_VAL_368_383 0x0c8c +#define MT6328_OTP_VAL_384_399 0x0c8e +#define MT6328_OTP_VAL_400_415 0x0c90 +#define MT6328_OTP_VAL_416_431 0x0c92 +#define MT6328_OTP_VAL_432_447 0x0c94 +#define MT6328_OTP_VAL_448_463 0x0c96 +#define MT6328_OTP_VAL_464_479 0x0c98 +#define MT6328_OTP_VAL_480_495 0x0c9a +#define MT6328_OTP_VAL_496_511 0x0c9c +#define MT6328_RTC_MIX_CON0 0x0c9e +#define MT6328_RTC_MIX_CON1 0x0ca0 +#define MT6328_RTC_MIX_CON2 0x0ca2 +#define MT6328_FGADC_CON0 0x0ca4 +#define MT6328_FGADC_CON1 0x0ca6 +#define MT6328_FGADC_CON2 0x0ca8 +#define MT6328_FGADC_CON3 0x0caa +#define MT6328_FGADC_CON4 0x0cac +#define MT6328_FGADC_CON5 0x0cae +#define MT6328_FGADC_CON6 0x0cb0 +#define MT6328_FGADC_CON7 0x0cb2 +#define MT6328_FGADC_CON8 0x0cb4 +#define MT6328_FGADC_CON9 0x0cb6 +#define MT6328_FGADC_CON10 0x0cb8 +#define MT6328_FGADC_CON11 0x0cba +#define MT6328_FGADC_CON12 0x0cbc +#define MT6328_FGADC_CON13 0x0cbe +#define MT6328_FGADC_CON14 0x0cc0 +#define MT6328_FGADC_CON15 0x0cc2 +#define MT6328_FGADC_CON16 0x0cc4 +#define MT6328_FGADC_CON17 0x0cc6 +#define MT6328_FGADC_CON18 0x0cc8 +#define MT6328_FGADC_CON19 0x0cca +#define MT6328_FGADC_CON20 0x0ccc +#define MT6328_FGADC_CON21 0x0cce +#define MT6328_FGADC_CON22 0x0cd0 +#define MT6328_FGADC_CON23 0x0cd2 +#define MT6328_FGADC_CON24 0x0cd4 +#define MT6328_FGADC_CON25 0x0cd6 +#define MT6328_FGADC_CON26 0x0cd8 +#define MT6328_FGADC_CON27 0x0cda +#define MT6328_AUDDEC_ANA_CON0 0x0cdc +#define MT6328_AUDDEC_ANA_CON1 0x0cde +#define MT6328_AUDDEC_ANA_CON2 0x0ce0 +#define MT6328_AUDDEC_ANA_CON3 0x0ce2 +#define MT6328_AUDDEC_ANA_CON4 0x0ce4 +#define MT6328_AUDDEC_ANA_CON5 0x0ce6 +#define MT6328_AUDDEC_ANA_CON6 0x0ce8 +#define MT6328_AUDDEC_ANA_CON7 0x0cea +#define MT6328_AUDDEC_ANA_CON8 0x0cec +#define MT6328_AUDENC_ANA_CON0 0x0cee +#define MT6328_AUDENC_ANA_CON1 0x0cf0 +#define MT6328_AUDENC_ANA_CON2 0x0cf2 +#define MT6328_AUDENC_ANA_CON3 0x0cf4 +#define MT6328_AUDENC_ANA_CON4 0x0cf6 +#define MT6328_AUDENC_ANA_CON5 0x0cf8 +#define MT6328_AUDENC_ANA_CON6 0x0cfa +#define MT6328_AUDENC_ANA_CON7 0x0cfc +#define MT6328_AUDENC_ANA_CON8 0x0cfe +#define MT6328_AUDENC_ANA_CON9 0x0d00 +#define MT6328_AUDENC_ANA_CON10 0x0d02 +#define MT6328_AUDNCP_CLKDIV_CON0 0x0d04 +#define MT6328_AUDNCP_CLKDIV_CON1 0x0d06 +#define MT6328_AUDNCP_CLKDIV_CON2 0x0d08 +#define MT6328_AUDNCP_CLKDIV_CON3 0x0d0a +#define MT6328_AUDNCP_CLKDIV_CON4 0x0d0c +#define MT6328_AUXADC_ADC0 0x0e00 +#define MT6328_AUXADC_ADC1 0x0e02 +#define MT6328_AUXADC_ADC2 0x0e04 +#define MT6328_AUXADC_ADC3 0x0e06 +#define MT6328_AUXADC_ADC4 0x0e08 +#define MT6328_AUXADC_ADC5 0x0e0a +#define MT6328_AUXADC_ADC6 0x0e0c +#define MT6328_AUXADC_ADC7 0x0e0e +#define MT6328_AUXADC_ADC8 0x0e10 +#define MT6328_AUXADC_ADC9 0x0e12 +#define MT6328_AUXADC_ADC10 0x0e14 +#define MT6328_AUXADC_ADC11 0x0e16 +#define MT6328_AUXADC_ADC12 0x0e18 +#define MT6328_AUXADC_ADC13 0x0e1a +#define MT6328_AUXADC_ADC14 0x0e1c +#define MT6328_AUXADC_ADC15 0x0e1e +#define MT6328_AUXADC_ADC16 0x0e20 +#define MT6328_AUXADC_ADC17 0x0e22 +#define MT6328_AUXADC_ADC18 0x0e24 +#define MT6328_AUXADC_ADC19 0x0e26 +#define MT6328_AUXADC_ADC20 0x0e28 +#define MT6328_AUXADC_ADC21 0x0e2a +#define MT6328_AUXADC_ADC22 0x0e2c +#define MT6328_AUXADC_ADC23 0x0e2e +#define MT6328_AUXADC_ADC24 0x0e30 +#define MT6328_AUXADC_ADC25 0x0e32 +#define MT6328_AUXADC_ADC26 0x0e34 +#define MT6328_AUXADC_ADC27 0x0e36 +#define MT6328_AUXADC_ADC28 0x0e38 +#define MT6328_AUXADC_ADC29 0x0e3a +#define MT6328_AUXADC_ADC30 0x0e3c +#define MT6328_AUXADC_ADC31 0x0e3e +#define MT6328_AUXADC_ADC32 0x0e40 +#define MT6328_AUXADC_ADC33 0x0e42 +#define MT6328_AUXADC_BUF0 0x0e44 +#define MT6328_AUXADC_BUF1 0x0e46 +#define MT6328_AUXADC_BUF2 0x0e48 +#define MT6328_AUXADC_BUF3 0x0e4a +#define MT6328_AUXADC_BUF4 0x0e4c +#define MT6328_AUXADC_BUF5 0x0e4e +#define MT6328_AUXADC_BUF6 0x0e50 +#define MT6328_AUXADC_BUF7 0x0e52 +#define MT6328_AUXADC_BUF8 0x0e54 +#define MT6328_AUXADC_BUF9 0x0e56 +#define MT6328_AUXADC_BUF10 0x0e58 +#define MT6328_AUXADC_BUF11 0x0e5a +#define MT6328_AUXADC_BUF12 0x0e5c +#define MT6328_AUXADC_BUF13 0x0e5e +#define MT6328_AUXADC_BUF14 0x0e60 +#define MT6328_AUXADC_BUF15 0x0e62 +#define MT6328_AUXADC_BUF16 0x0e64 +#define MT6328_AUXADC_BUF17 0x0e66 +#define MT6328_AUXADC_BUF18 0x0e68 +#define MT6328_AUXADC_BUF19 0x0e6a +#define MT6328_AUXADC_BUF20 0x0e6c +#define MT6328_AUXADC_BUF21 0x0e6e +#define MT6328_AUXADC_BUF22 0x0e70 +#define MT6328_AUXADC_BUF23 0x0e72 +#define MT6328_AUXADC_BUF24 0x0e74 +#define MT6328_AUXADC_BUF25 0x0e76 +#define MT6328_AUXADC_BUF26 0x0e78 +#define MT6328_AUXADC_BUF27 0x0e7a +#define MT6328_AUXADC_BUF28 0x0e7c +#define MT6328_AUXADC_BUF29 0x0e7e +#define MT6328_AUXADC_BUF30 0x0e80 +#define MT6328_AUXADC_BUF31 0x0e82 +#define MT6328_AUXADC_STA0 0x0e84 +#define MT6328_AUXADC_STA1 0x0e86 +#define MT6328_AUXADC_RQST0 0x0e88 +#define MT6328_AUXADC_RQST0_SET 0x0e8a +#define MT6328_AUXADC_RQST0_CLR 0x0e8c +#define MT6328_AUXADC_RQST1 0x0e8e +#define MT6328_AUXADC_RQST1_SET 0x0e90 +#define MT6328_AUXADC_RQST1_CLR 0x0e92 +#define MT6328_AUXADC_CON0 0x0e94 +#define MT6328_AUXADC_CON0_SET 0x0e96 +#define MT6328_AUXADC_CON0_CLR 0x0e98 +#define MT6328_AUXADC_CON1 0x0e9a +#define MT6328_AUXADC_CON2 0x0e9c +#define MT6328_AUXADC_CON3 0x0e9e +#define MT6328_AUXADC_CON4 0x0ea0 +#define MT6328_AUXADC_CON5 0x0ea2 +#define MT6328_AUXADC_CON6 0x0ea4 +#define MT6328_AUXADC_CON7 0x0ea6 +#define MT6328_AUXADC_CON8 0x0ea8 +#define MT6328_AUXADC_CON9 0x0eaa +#define MT6328_AUXADC_CON10 0x0eac +#define MT6328_AUXADC_CON11 0x0eae +#define MT6328_AUXADC_CON12 0x0eb0 +#define MT6328_AUXADC_CON13 0x0eb2 +#define MT6328_AUXADC_CON14 0x0eb4 +#define MT6328_AUXADC_CON15 0x0eb6 +#define MT6328_AUXADC_CON16 0x0eb8 +#define MT6328_AUXADC_AUTORPT0 0x0eba +#define MT6328_AUXADC_LBAT0 0x0ebc +#define MT6328_AUXADC_LBAT1 0x0ebe +#define MT6328_AUXADC_LBAT2 0x0ec0 +#define MT6328_AUXADC_LBAT3 0x0ec2 +#define MT6328_AUXADC_LBAT4 0x0ec4 +#define MT6328_AUXADC_LBAT5 0x0ec6 +#define MT6328_AUXADC_LBAT6 0x0ec8 +#define MT6328_AUXADC_ACCDET 0x0eca +#define MT6328_AUXADC_THR0 0x0ecc +#define MT6328_AUXADC_THR1 0x0ece +#define MT6328_AUXADC_THR2 0x0ed0 +#define MT6328_AUXADC_THR3 0x0ed2 +#define MT6328_AUXADC_THR4 0x0ed4 +#define MT6328_AUXADC_THR5 0x0ed6 +#define MT6328_AUXADC_THR6 0x0ed8 +#define MT6328_AUXADC_EFUSE0 0x0eda +#define MT6328_AUXADC_EFUSE1 0x0edc +#define MT6328_AUXADC_EFUSE2 0x0ede +#define MT6328_AUXADC_EFUSE3 0x0ee0 +#define MT6328_AUXADC_EFUSE4 0x0ee2 +#define MT6328_AUXADC_EFUSE5 0x0ee4 +#define MT6328_AUXADC_DBG0 0x0ee6 +#define MT6328_AUXADC_IMP0 0x0ee8 +#define MT6328_AUXADC_IMP1 0x0eea +#define MT6328_AUXADC_VISMPS0_1 0x0eec +#define MT6328_AUXADC_VISMPS0_2 0x0eee +#define MT6328_AUXADC_VISMPS0_3 0x0ef0 +#define MT6328_AUXADC_VISMPS0_4 0x0ef2 +#define MT6328_AUXADC_VISMPS0_5 0x0ef4 +#define MT6328_AUXADC_VISMPS0_6 0x0ef6 +#define MT6328_AUXADC_VISMPS0_7 0x0ef8 +#define MT6328_AUXADC_LBAT2_1 0x0efa +#define MT6328_AUXADC_LBAT2_2 0x0efc +#define MT6328_AUXADC_LBAT2_3 0x0efe +#define MT6328_AUXADC_LBAT2_4 0x0f00 +#define MT6328_AUXADC_LBAT2_5 0x0f02 +#define MT6328_AUXADC_LBAT2_6 0x0f04 +#define MT6328_AUXADC_LBAT2_7 0x0f06 +#define MT6328_AUXADC_MDBG_0 0x0f08 +#define MT6328_AUXADC_MDBG_1 0x0f0a +#define MT6328_AUXADC_MDBG_2 0x0f0c +#define MT6328_AUXADC_MDRT_0 0x0f0e +#define MT6328_AUXADC_MDRT_1 0x0f10 +#define MT6328_AUXADC_MDRT_2 0x0f12 +#define MT6328_ACCDET_CON0 0x0f14 +#define MT6328_ACCDET_CON1 0x0f16 +#define MT6328_ACCDET_CON2 0x0f18 +#define MT6328_ACCDET_CON3 0x0f1a +#define MT6328_ACCDET_CON4 0x0f1c +#define MT6328_ACCDET_CON5 0x0f1e +#define MT6328_ACCDET_CON6 0x0f20 +#define MT6328_ACCDET_CON7 0x0f22 +#define MT6328_ACCDET_CON8 0x0f24 +#define MT6328_ACCDET_CON9 0x0f26 +#define MT6328_ACCDET_CON10 0x0f28 +#define MT6328_ACCDET_CON11 0x0f2a +#define MT6328_ACCDET_CON12 0x0f2c +#define MT6328_ACCDET_CON13 0x0f2e +#define MT6328_ACCDET_CON14 0x0f30 +#define MT6328_ACCDET_CON15 0x0f32 +#define MT6328_ACCDET_CON16 0x0f34 +#define MT6328_ACCDET_CON17 0x0f36 +#define MT6328_ACCDET_CON18 0x0f38 +#define MT6328_ACCDET_CON19 0x0f3a +#define MT6328_ACCDET_CON20 0x0f3c +#define MT6328_ACCDET_CON21 0x0f3e +#define MT6328_ACCDET_CON22 0x0f40 +#define MT6328_ACCDET_CON23 0x0f42 +#define MT6328_ACCDET_CON24 0x0f44 +#define MT6328_ACCDET_CON25 0x0f46 +#define MT6328_CHR_CON0 0x0f48 +#define MT6328_CHR_CON1 0x0f4a +#define MT6328_CHR_CON2 0x0f4c +#define MT6328_CHR_CON3 0x0f4e +#define MT6328_CHR_CON4 0x0f50 +#define MT6328_CHR_CON5 0x0f52 +#define MT6328_CHR_CON6 0x0f54 +#define MT6328_CHR_CON7 0x0f56 +#define MT6328_CHR_CON8 0x0f58 +#define MT6328_CHR_CON9 0x0f5a +#define MT6328_CHR_CON10 0x0f5c +#define MT6328_CHR_CON11 0x0f5e +#define MT6328_CHR_CON12 0x0f60 +#define MT6328_CHR_CON13 0x0f62 +#define MT6328_CHR_CON14 0x0f64 +#define MT6328_CHR_CON15 0x0f66 +#define MT6328_CHR_CON16 0x0f68 +#define MT6328_CHR_CON17 0x0f6a +#define MT6328_CHR_CON18 0x0f6c +#define MT6328_CHR_CON19 0x0f6e +#define MT6328_CHR_CON20 0x0f70 +#define MT6328_CHR_CON21 0x0f72 +#define MT6328_CHR_CON22 0x0f74 +#define MT6328_CHR_CON23 0x0f76 +#define MT6328_CHR_CON24 0x0f78 +#define MT6328_CHR_CON25 0x0f7a +#define MT6328_CHR_CON26 0x0f7c +#define MT6328_CHR_CON27 0x0f7e +#define MT6328_CHR_CON28 0x0f80 +#define MT6328_CHR_CON29 0x0f82 +#define MT6328_CHR_CON30 0x0f84 +#define MT6328_CHR_CON31 0x0f86 +#define MT6328_CHR_CON32 0x0f88 +#define MT6328_CHR_CON33 0x0f8a +#define MT6328_CHR_CON34 0x0f8c +#define MT6328_CHR_CON35 0x0f8e +#define MT6328_CHR_CON36 0x0f90 +#define MT6328_CHR_CON37 0x0f92 +#define MT6328_CHR_CON38 0x0f94 +#define MT6328_CHR_CON39 0x0f96 +#define MT6328_CHR_CON40 0x0f98 +#define MT6328_CHR_CON41 0x0f9a +#define MT6328_CHR_CON42 0x0f9c +#define MT6328_BATON_CON0 0x0f9e +#define MT6328_CHR_CON43 0x0fa0 +#define MT6328_EOSC_CALI_CON0 0x0faa +#define MT6328_EOSC_CALI_CON1 0x0fac +#define MT6328_VRTC_PWM_CON0 0x0fae + +#endif /* __MFD_MT6328_REGISTERS_H__ */ diff --git a/include/linux/mfd/mt6397/core.h b/include/linux/mfd/mt6397/core.h index 627487e26287..b774c3a4bb62 100644 --- a/include/linux/mfd/mt6397/core.h +++ b/include/linux/mfd/mt6397/core.h @@ -12,6 +12,7 @@ enum chip_id { MT6323_CHIP_ID = 0x23, + MT6328_CHIP_ID = 0x30, MT6331_CHIP_ID = 0x20, MT6332_CHIP_ID = 0x20, MT6357_CHIP_ID = 0x57, @@ -65,11 +66,11 @@ struct mt6397_chip { int irq; struct irq_domain *irq_domain; struct mutex irqlock; - u16 wake_mask[2]; - u16 irq_masks_cur[2]; - u16 irq_masks_cache[2]; - u16 int_con[2]; - u16 int_status[2]; + u16 wake_mask[3]; + u16 irq_masks_cur[3]; + u16 irq_masks_cache[3]; + u16 int_con[3]; + u16 int_status[3]; u16 chip_id; void *irq_data; }; diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index eda1ffd99c1a..dabcc0dea802 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -98,8 +98,8 @@ struct palmas_sleep_requestor_info { }; struct palmas_regs_info { - char *name; - char *sname; + const char *name; + const char *sname; u8 vsel_addr; u8 ctrl_addr; u8 tstep_addr; diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h index a212b9f72bc9..750274d41fc0 100644 --- a/include/linux/mfd/samsung/core.h +++ b/include/linux/mfd/samsung/core.h @@ -37,6 +37,7 @@ struct gpio_desc; enum sec_device_type { S5M8767X, + S2DOS05, S2MPA01, S2MPS11X, S2MPS13X, diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h index 43a7b9dcf15e..e781727c8916 100644 --- a/include/linux/min_heap.h +++ b/include/linux/min_heap.h @@ -38,9 +38,176 @@ struct min_heap_callbacks { void (*swp)(void *lhs, void *rhs, void *args); }; +/** + * is_aligned - is this pointer & size okay for word-wide copying? + * @base: pointer to data + * @size: size of each element + * @align: required alignment (typically 4 or 8) + * + * Returns true if elements can be copied using word loads and stores. + * The size must be a multiple of the alignment, and the base address must + * be if we do not have CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. + * + * For some reason, gcc doesn't know to optimize "if (a & mask || b & mask)" + * to "if ((a | b) & mask)", so we do that by hand. + */ +__attribute_const__ __always_inline +static bool is_aligned(const void *base, size_t size, unsigned char align) +{ + unsigned char lsbits = (unsigned char)size; + + (void)base; +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + lsbits |= (unsigned char)(uintptr_t)base; +#endif + return (lsbits & (align - 1)) == 0; +} + +/** + * swap_words_32 - swap two elements in 32-bit chunks + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size (must be a multiple of 4) + * + * Exchange the two objects in memory. This exploits base+index addressing, + * which basically all CPUs have, to minimize loop overhead computations. + * + * For some reason, on x86 gcc 7.3.0 adds a redundant test of n at the + * bottom of the loop, even though the zero flag is still valid from the + * subtract (since the intervening mov instructions don't alter the flags). + * Gcc 8.1.0 doesn't have that problem. + */ +static __always_inline +void swap_words_32(void *a, void *b, size_t n) +{ + do { + u32 t = *(u32 *)(a + (n -= 4)); + *(u32 *)(a + n) = *(u32 *)(b + n); + *(u32 *)(b + n) = t; + } while (n); +} + +/** + * swap_words_64 - swap two elements in 64-bit chunks + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size (must be a multiple of 8) + * + * Exchange the two objects in memory. This exploits base+index + * addressing, which basically all CPUs have, to minimize loop overhead + * computations. + * + * We'd like to use 64-bit loads if possible. If they're not, emulating + * one requires base+index+4 addressing which x86 has but most other + * processors do not. If CONFIG_64BIT, we definitely have 64-bit loads, + * but it's possible to have 64-bit loads without 64-bit pointers (e.g. + * x32 ABI). Are there any cases the kernel needs to worry about? + */ +static __always_inline +void swap_words_64(void *a, void *b, size_t n) +{ + do { +#ifdef CONFIG_64BIT + u64 t = *(u64 *)(a + (n -= 8)); + *(u64 *)(a + n) = *(u64 *)(b + n); + *(u64 *)(b + n) = t; +#else + /* Use two 32-bit transfers to avoid base+index+4 addressing */ + u32 t = *(u32 *)(a + (n -= 4)); + *(u32 *)(a + n) = *(u32 *)(b + n); + *(u32 *)(b + n) = t; + + t = *(u32 *)(a + (n -= 4)); + *(u32 *)(a + n) = *(u32 *)(b + n); + *(u32 *)(b + n) = t; +#endif + } while (n); +} + +/** + * swap_bytes - swap two elements a byte at a time + * @a: pointer to the first element to swap + * @b: pointer to the second element to swap + * @n: element size + * + * This is the fallback if alignment doesn't allow using larger chunks. + */ +static __always_inline +void swap_bytes(void *a, void *b, size_t n) +{ + do { + char t = ((char *)a)[--n]; + ((char *)a)[n] = ((char *)b)[n]; + ((char *)b)[n] = t; + } while (n); +} + +/* + * The values are arbitrary as long as they can't be confused with + * a pointer, but small integers make for the smallest compare + * instructions. + */ +#define SWAP_WORDS_64 ((void (*)(void *, void *, void *))0) +#define SWAP_WORDS_32 ((void (*)(void *, void *, void *))1) +#define SWAP_BYTES ((void (*)(void *, void *, void *))2) + +/* + * Selects the appropriate swap function based on the element size. + */ +static __always_inline +void *select_swap_func(const void *base, size_t size) +{ + if (is_aligned(base, size, 8)) + return SWAP_WORDS_64; + else if (is_aligned(base, size, 4)) + return SWAP_WORDS_32; + else + return SWAP_BYTES; +} + +static __always_inline +void do_swap(void *a, void *b, size_t size, void (*swap_func)(void *lhs, void *rhs, void *args), + void *priv) +{ + if (swap_func == SWAP_WORDS_64) + swap_words_64(a, b, size); + else if (swap_func == SWAP_WORDS_32) + swap_words_32(a, b, size); + else if (swap_func == SWAP_BYTES) + swap_bytes(a, b, size); + else + swap_func(a, b, priv); +} + +/** + * parent - given the offset of the child, find the offset of the parent. + * @i: the offset of the heap element whose parent is sought. Non-zero. + * @lsbit: a precomputed 1-bit mask, equal to "size & -size" + * @size: size of each element + * + * In terms of array indexes, the parent of element j = @i/@size is simply + * (j-1)/2. But when working in byte offsets, we can't use implicit + * truncation of integer divides. + * + * Fortunately, we only need one bit of the quotient, not the full divide. + * @size has a least significant bit. That bit will be clear if @i is + * an even multiple of @size, and set if it's an odd multiple. + * + * Logically, we're doing "if (i & lsbit) i -= size;", but since the + * branch is unpredictable, it's done with a bit of clever branch-free + * code instead. + */ +__attribute_const__ __always_inline +static size_t parent(size_t i, unsigned int lsbit, size_t size) +{ + i -= size; + i -= size & -(i & lsbit); + return i / 2; +} + /* Initialize a min-heap. */ static __always_inline -void __min_heap_init(min_heap_char *heap, void *data, int size) +void __min_heap_init_inline(min_heap_char *heap, void *data, int size) { heap->nr = 0; heap->size = size; @@ -50,105 +217,114 @@ void __min_heap_init(min_heap_char *heap, void *data, int size) heap->data = heap->preallocated; } -#define min_heap_init(_heap, _data, _size) \ - __min_heap_init((min_heap_char *)_heap, _data, _size) +#define min_heap_init_inline(_heap, _data, _size) \ + __min_heap_init_inline((min_heap_char *)_heap, _data, _size) /* Get the minimum element from the heap. */ static __always_inline -void *__min_heap_peek(struct min_heap_char *heap) +void *__min_heap_peek_inline(struct min_heap_char *heap) { return heap->nr ? heap->data : NULL; } -#define min_heap_peek(_heap) \ - (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) +#define min_heap_peek_inline(_heap) \ + (__minheap_cast(_heap) __min_heap_peek_inline((min_heap_char *)_heap)) /* Check if the heap is full. */ static __always_inline -bool __min_heap_full(min_heap_char *heap) +bool __min_heap_full_inline(min_heap_char *heap) { return heap->nr == heap->size; } -#define min_heap_full(_heap) \ - __min_heap_full((min_heap_char *)_heap) +#define min_heap_full_inline(_heap) \ + __min_heap_full_inline((min_heap_char *)_heap) /* Sift the element at pos down the heap. */ static __always_inline -void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { - void *left, *right; + const unsigned long lsbit = elem_size & -elem_size; void *data = heap->data; - void *root = data + pos * elem_size; - int i = pos, j; + void (*swp)(void *lhs, void *rhs, void *args) = func->swp; + /* pre-scale counters for performance */ + size_t a = pos * elem_size; + size_t b, c, d; + size_t n = heap->nr * elem_size; + + if (!swp) + swp = select_swap_func(data, elem_size); /* Find the sift-down path all the way to the leaves. */ - for (;;) { - if (i * 2 + 2 >= heap->nr) - break; - left = data + (i * 2 + 1) * elem_size; - right = data + (i * 2 + 2) * elem_size; - i = func->less(left, right, args) ? i * 2 + 1 : i * 2 + 2; - } + for (b = a; c = 2 * b + elem_size, (d = c + elem_size) < n;) + b = func->less(data + c, data + d, args) ? c : d; /* Special case for the last leaf with no sibling. */ - if (i * 2 + 2 == heap->nr) - i = i * 2 + 1; + if (d == n) + b = c; /* Backtrack to the correct location. */ - while (i != pos && func->less(root, data + i * elem_size, args)) - i = (i - 1) / 2; + while (b != a && func->less(data + a, data + b, args)) + b = parent(b, lsbit, elem_size); /* Shift the element into its correct place. */ - j = i; - while (i != pos) { - i = (i - 1) / 2; - func->swp(data + i * elem_size, data + j * elem_size, args); + c = b; + while (b != a) { + b = parent(b, lsbit, elem_size); + do_swap(data + b, data + c, elem_size, swp, args); } } -#define min_heap_sift_down(_heap, _pos, _func, _args) \ - __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) +#define min_heap_sift_down_inline(_heap, _pos, _func, _args) \ + __min_heap_sift_down_inline((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), \ + _func, _args) /* Sift up ith element from the heap, O(log2(nr)). */ static __always_inline -void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx, - const struct min_heap_callbacks *func, void *args) +void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args) { + const unsigned long lsbit = elem_size & -elem_size; void *data = heap->data; - size_t parent; + void (*swp)(void *lhs, void *rhs, void *args) = func->swp; + /* pre-scale counters for performance */ + size_t a = idx * elem_size, b; + + if (!swp) + swp = select_swap_func(data, elem_size); - while (idx) { - parent = (idx - 1) / 2; - if (func->less(data + parent * elem_size, data + idx * elem_size, args)) + while (a) { + b = parent(a, lsbit, elem_size); + if (func->less(data + b, data + a, args)) break; - func->swp(data + parent * elem_size, data + idx * elem_size, args); - idx = parent; + do_swap(data + a, data + b, elem_size, swp, args); + a = b; } } -#define min_heap_sift_up(_heap, _idx, _func, _args) \ - __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) +#define min_heap_sift_up_inline(_heap, _idx, _func, _args) \ + __min_heap_sift_up_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ + _func, _args) /* Floyd's approach to heapification that is O(nr). */ static __always_inline -void __min_heapify_all(min_heap_char *heap, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { int i; for (i = heap->nr / 2 - 1; i >= 0; i--) - __min_heap_sift_down(heap, i, elem_size, func, args); + __min_heap_sift_down_inline(heap, i, elem_size, func, args); } -#define min_heapify_all(_heap, _func, _args) \ - __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heapify_all_inline(_heap, _func, _args) \ + __min_heapify_all_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) /* Remove minimum element from the heap, O(log2(nr)). */ static __always_inline -bool __min_heap_pop(min_heap_char *heap, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; @@ -158,13 +334,13 @@ bool __min_heap_pop(min_heap_char *heap, size_t elem_size, /* Place last element at the root (position 0) and then sift down. */ heap->nr--; memcpy(data, data + (heap->nr * elem_size), elem_size); - __min_heap_sift_down(heap, 0, elem_size, func, args); + __min_heap_sift_down_inline(heap, 0, elem_size, func, args); return true; } -#define min_heap_pop(_heap, _func, _args) \ - __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop_inline(_heap, _func, _args) \ + __min_heap_pop_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) /* * Remove the minimum element and then push the given element. The @@ -172,22 +348,21 @@ bool __min_heap_pop(min_heap_char *heap, size_t elem_size, * efficient than a pop followed by a push that does 2. */ static __always_inline -void __min_heap_pop_push(min_heap_char *heap, - const void *element, size_t elem_size, - const struct min_heap_callbacks *func, - void *args) +void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { memcpy(heap->data, element, elem_size); - __min_heap_sift_down(heap, 0, elem_size, func, args); + __min_heap_sift_down_inline(heap, 0, elem_size, func, args); } -#define min_heap_pop_push(_heap, _element, _func, _args) \ - __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop_push_inline(_heap, _element, _func, _args) \ + __min_heap_pop_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ + _func, _args) /* Push an element on to the heap, O(log2(nr)). */ static __always_inline -bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, - const struct min_heap_callbacks *func, void *args) +bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; int pos; @@ -201,35 +376,81 @@ bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, heap->nr++; /* Sift child at pos up. */ - __min_heap_sift_up(heap, elem_size, pos, func, args); + __min_heap_sift_up_inline(heap, elem_size, pos, func, args); return true; } -#define min_heap_push(_heap, _element, _func, _args) \ - __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) +#define min_heap_push_inline(_heap, _element, _func, _args) \ + __min_heap_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ + _func, _args) /* Remove ith element from the heap, O(log2(nr)). */ static __always_inline -bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, - const struct min_heap_callbacks *func, void *args) +bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args) { void *data = heap->data; + void (*swp)(void *lhs, void *rhs, void *args) = func->swp; if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap")) return false; + if (!swp) + swp = select_swap_func(data, elem_size); + /* Place last element at the root (position 0) and then sift down. */ heap->nr--; if (idx == heap->nr) return true; - func->swp(data + (idx * elem_size), data + (heap->nr * elem_size), args); - __min_heap_sift_up(heap, elem_size, idx, func, args); - __min_heap_sift_down(heap, idx, elem_size, func, args); + do_swap(data + (idx * elem_size), data + (heap->nr * elem_size), elem_size, swp, args); + __min_heap_sift_up_inline(heap, elem_size, idx, func, args); + __min_heap_sift_down_inline(heap, idx, elem_size, func, args); return true; } +#define min_heap_del_inline(_heap, _idx, _func, _args) \ + __min_heap_del_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ + _func, _args) + +void __min_heap_init(min_heap_char *heap, void *data, int size); +void *__min_heap_peek(struct min_heap_char *heap); +bool __min_heap_full(min_heap_char *heap); +void __min_heap_sift_down(min_heap_char *heap, int pos, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +void __min_heap_sift_up(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args); +void __min_heapify_all(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +bool __min_heap_pop(min_heap_char *heap, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +void __min_heap_pop_push(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +bool __min_heap_push(min_heap_char *heap, const void *element, size_t elem_size, + const struct min_heap_callbacks *func, void *args); +bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, + const struct min_heap_callbacks *func, void *args); + +#define min_heap_init(_heap, _data, _size) \ + __min_heap_init((min_heap_char *)_heap, _data, _size) +#define min_heap_peek(_heap) \ + (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) +#define min_heap_full(_heap) \ + __min_heap_full((min_heap_char *)_heap) +#define min_heap_sift_down(_heap, _pos, _func, _args) \ + __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) +#define min_heap_sift_up(_heap, _idx, _func, _args) \ + __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) +#define min_heapify_all(_heap, _func, _args) \ + __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop(_heap, _func, _args) \ + __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) +#define min_heap_pop_push(_heap, _element, _func, _args) \ + __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ + _func, _args) +#define min_heap_push(_heap, _element, _func, _args) \ + __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) #define min_heap_del(_heap, _idx, _func, _args) \ __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) diff --git a/include/linux/misc/keba.h b/include/linux/misc/keba.h index 323b31a847c5..a81d6fa70851 100644 --- a/include/linux/misc/keba.h +++ b/include/linux/misc/keba.h @@ -7,6 +7,7 @@ #include <linux/auxiliary_bus.h> struct i2c_board_info; +struct spi_board_info; /** * struct keba_i2c_auxdev - KEBA I2C auxiliary device @@ -22,4 +23,50 @@ struct keba_i2c_auxdev { struct i2c_board_info *info; }; +/** + * struct keba_spi_auxdev - KEBA SPI auxiliary device + * @auxdev: auxiliary device object + * @io: address range of SPI controller IO memory + * @info_size: number of SPI devices to be probed + * @info: SPI devices to be probed + */ +struct keba_spi_auxdev { + struct auxiliary_device auxdev; + struct resource io; + int info_size; + struct spi_board_info *info; +}; + +/** + * struct keba_fan_auxdev - KEBA fan auxiliary device + * @auxdev: auxiliary device object + * @io: address range of fan controller IO memory + */ +struct keba_fan_auxdev { + struct auxiliary_device auxdev; + struct resource io; +}; + +/** + * struct keba_batt_auxdev - KEBA battery auxiliary device + * @auxdev: auxiliary device object + * @io: address range of battery controller IO memory + */ +struct keba_batt_auxdev { + struct auxiliary_device auxdev; + struct resource io; +}; + +/** + * struct keba_uart_auxdev - KEBA UART auxiliary device + * @auxdev: auxiliary device object + * @io: address range of UART controller IO memory + * @irq: number of UART controller interrupt + */ +struct keba_uart_auxdev { + struct auxiliary_device auxdev; + struct resource io; + unsigned int irq; +}; + #endif /* _LINUX_MISC_KEBA_H */ diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e23c692a34c7..fc7e6153b73d 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -45,7 +45,6 @@ #include <linux/workqueue.h> #include <linux/mempool.h> #include <linux/interrupt.h> -#include <linux/idr.h> #include <linux/notifier.h> #include <linux/refcount.h> #include <linux/auxiliary_bus.h> @@ -474,36 +473,6 @@ struct mlx5_core_sriov { u16 max_ec_vfs; }; -struct mlx5_fc_pool { - struct mlx5_core_dev *dev; - struct mutex pool_lock; /* protects pool lists */ - struct list_head fully_used; - struct list_head partially_used; - struct list_head unused; - int available_fcs; - int used_fcs; - int threshold; -}; - -struct mlx5_fc_stats { - spinlock_t counters_idr_lock; /* protects counters_idr */ - struct idr counters_idr; - struct list_head counters; - struct llist_head addlist; - struct llist_head dellist; - - struct workqueue_struct *wq; - struct delayed_work work; - unsigned long next_query; - unsigned long sampling_interval; /* jiffies */ - u32 *bulk_query_out; - int bulk_query_len; - size_t num_counters; - bool bulk_query_alloc_failed; - unsigned long next_bulk_query_alloc; - struct mlx5_fc_pool fc_pool; -}; - struct mlx5_events; struct mlx5_mpfs; struct mlx5_eswitch; @@ -630,7 +599,7 @@ struct mlx5_priv { struct mlx5_devcom_comp_dev *hca_devcom_comp; struct mlx5_fw_reset *fw_reset; struct mlx5_core_roce roce; - struct mlx5_fc_stats fc_stats; + struct mlx5_fc_stats *fc_stats; struct mlx5_rl_table rl_table; struct mlx5_ft_pool *ft_pool; diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b744e554f014..438db888bde0 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -298,9 +298,6 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging); -/* As mlx5_fc_create() but doesn't queue stats refresh thread. */ -struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging); - void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter); u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter); void mlx5_fc_query_cached(struct mlx5_fc *counter, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 96d369112bfa..4fbbcf35498b 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1872,7 +1872,11 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_328[0x2]; u8 relaxed_ordering_read[0x1]; u8 log_max_pd[0x5]; - u8 reserved_at_330[0x5]; + u8 dp_ordering_ooo_all_ud[0x1]; + u8 dp_ordering_ooo_all_uc[0x1]; + u8 dp_ordering_ooo_all_xrc[0x1]; + u8 dp_ordering_ooo_all_dc[0x1]; + u8 dp_ordering_ooo_all_rc[0x1]; u8 pcie_reset_using_hotreset_method[0x1]; u8 pci_sync_for_fw_update_with_driver_unload[0x1]; u8 vnic_env_cnt_steering_fail[0x1]; @@ -2094,7 +2098,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_0[0x80]; u8 migratable[0x1]; - u8 reserved_at_81[0x11]; + u8 reserved_at_81[0x7]; + u8 dp_ordering_force[0x1]; + u8 reserved_at_89[0x9]; u8 query_vuid[0x1]; u8 reserved_at_93[0x5]; u8 umr_log_entity_size_5[0x1]; @@ -3524,7 +3530,8 @@ struct mlx5_ifc_qpc_bits { u8 latency_sensitive[0x1]; u8 reserved_at_24[0x1]; u8 drain_sigerr[0x1]; - u8 reserved_at_26[0x2]; + u8 reserved_at_26[0x1]; + u8 dp_ordering_force[0x1]; u8 pd[0x18]; u8 mtu[0x3]; @@ -3597,7 +3604,8 @@ struct mlx5_ifc_qpc_bits { u8 rae[0x1]; u8 reserved_at_493[0x1]; u8 page_offset[0x6]; - u8 reserved_at_49a[0x3]; + u8 reserved_at_49a[0x2]; + u8 dp_ordering_1[0x1]; u8 cd_slave_receive[0x1]; u8 cd_slave_send[0x1]; u8 cd_master[0x1]; @@ -4105,11 +4113,47 @@ enum { ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4, }; +enum { + TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, + TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, + TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, +}; + +enum { + TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, + TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, + TSAR_TYPE_CAP_MASK_ETS = 1 << 2, +}; + +struct mlx5_ifc_tsar_element_bits { + u8 reserved_at_0[0x8]; + u8 tsar_type[0x8]; + u8 reserved_at_10[0x10]; +}; + +struct mlx5_ifc_vport_element_bits { + u8 reserved_at_0[0x10]; + u8 vport_number[0x10]; +}; + +struct mlx5_ifc_vport_tc_element_bits { + u8 traffic_class[0x4]; + u8 reserved_at_4[0xc]; + u8 vport_number[0x10]; +}; + +union mlx5_ifc_element_attributes_bits { + struct mlx5_ifc_tsar_element_bits tsar; + struct mlx5_ifc_vport_element_bits vport; + struct mlx5_ifc_vport_tc_element_bits vport_tc; + u8 reserved_at_0[0x20]; +}; + struct mlx5_ifc_scheduling_context_bits { u8 element_type[0x8]; u8 reserved_at_8[0x18]; - u8 element_attributes[0x20]; + union mlx5_ifc_element_attributes_bits element_attributes; u8 parent_element_id[0x20]; @@ -4507,7 +4551,8 @@ struct mlx5_ifc_dctc_bits { u8 state[0x4]; u8 reserved_at_8[0x18]; - u8 reserved_at_20[0x8]; + u8 reserved_at_20[0x7]; + u8 dp_ordering_force[0x1]; u8 user_index[0x18]; u8 reserved_at_40[0x8]; @@ -4522,7 +4567,9 @@ struct mlx5_ifc_dctc_bits { u8 latency_sensitive[0x1]; u8 rlky[0x1]; u8 free_ar[0x1]; - u8 reserved_at_73[0xd]; + u8 reserved_at_73[0x1]; + u8 dp_ordering_1[0x1]; + u8 reserved_at_75[0xb]; u8 reserved_at_80[0x8]; u8 cs_res[0x8]; @@ -4798,35 +4845,6 @@ struct mlx5_ifc_register_loopback_control_bits { u8 reserved_at_20[0x60]; }; -struct mlx5_ifc_vport_tc_element_bits { - u8 traffic_class[0x4]; - u8 reserved_at_4[0xc]; - u8 vport_number[0x10]; -}; - -struct mlx5_ifc_vport_element_bits { - u8 reserved_at_0[0x10]; - u8 vport_number[0x10]; -}; - -enum { - TSAR_ELEMENT_TSAR_TYPE_DWRR = 0x0, - TSAR_ELEMENT_TSAR_TYPE_ROUND_ROBIN = 0x1, - TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, -}; - -enum { - TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, - TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, - TSAR_TYPE_CAP_MASK_ETS = 1 << 2, -}; - -struct mlx5_ifc_tsar_element_bits { - u8 reserved_at_0[0x8]; - u8 tsar_type[0x8]; - u8 reserved_at_10[0x10]; -}; - enum { MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_SUCCESS = 0x0, MLX5_TEARDOWN_HCA_OUT_FORCE_STATE_FAIL = 0x1, diff --git a/include/linux/mm.h b/include/linux/mm.h index 61fff5d34ed5..c39c4945946c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -97,11 +97,11 @@ extern const int mmap_rnd_compat_bits_max; extern int mmap_rnd_compat_bits __read_mostly; #endif -#ifndef PHYSMEM_END +#ifndef DIRECT_MAP_PHYSMEM_END # ifdef MAX_PHYSMEM_BITS -# define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1) +# define DIRECT_MAP_PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1) # else -# define PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63)) +# define DIRECT_MAP_PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63)) # endif #endif @@ -329,12 +329,14 @@ extern unsigned int kobjsize(const void *objp); #define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */ +#define VM_HIGH_ARCH_BIT_6 38 /* bit only usable on 64-bit architectures */ #define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0) #define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1) #define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2) #define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3) #define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4) #define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5) +#define VM_HIGH_ARCH_6 BIT(VM_HIGH_ARCH_BIT_6) #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ #ifdef CONFIG_ARCH_HAS_PKEYS @@ -365,7 +367,17 @@ extern unsigned int kobjsize(const void *objp); * for more details on the guard size. */ # define VM_SHADOW_STACK VM_HIGH_ARCH_5 -#else +#endif + +#if defined(CONFIG_ARM64_GCS) +/* + * arm64's Guarded Control Stack implements similar functionality and + * has similar constraints to shadow stacks. + */ +# define VM_SHADOW_STACK VM_HIGH_ARCH_6 +#endif + +#ifndef VM_SHADOW_STACK # define VM_SHADOW_STACK VM_NONE #endif @@ -1286,8 +1298,6 @@ static inline struct folio *virt_to_folio(const void *x) void __folio_put(struct folio *folio); -void put_pages_list(struct list_head *pages); - void split_page(struct page *page, unsigned int order); void folio_copy(struct folio *dst, struct folio *src); int folio_mc_copy(struct folio *dst, struct folio *src); @@ -1895,7 +1905,7 @@ static inline unsigned long page_to_section(const struct page *page) * * Return: The Page Frame Number of the first page in the folio. */ -static inline unsigned long folio_pfn(struct folio *folio) +static inline unsigned long folio_pfn(const struct folio *folio) { return page_to_pfn(&folio->page); } @@ -2524,6 +2534,7 @@ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end, struct folio **folios, unsigned int max_folios, pgoff_t *offset); +int folio_add_pins(struct folio *folio, unsigned int pins); int get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages); @@ -2539,6 +2550,7 @@ struct kvec; struct page *get_dump_page(unsigned long addr); bool folio_mark_dirty(struct folio *folio); +bool folio_mark_dirty_lock(struct folio *folio); bool set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); @@ -3015,8 +3027,11 @@ static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, return pte; } -pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, - unsigned long addr, spinlock_t **ptlp); +pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp); +pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, pmd_t *pmdvalp, + spinlock_t **ptlp); #define pte_unmap_unlock(pte, ptl) do { \ spin_unlock(ptl); \ @@ -3818,9 +3833,6 @@ void *sparse_buffer_alloc(unsigned long size); struct page * __populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); -void pud_init(void *addr); -void pmd_init(void *addr); -void kernel_pte_init(void *addr); pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); @@ -4163,61 +4175,8 @@ static inline int do_mseal(unsigned long start, size_t len_in, unsigned long fla } #endif -#ifdef CONFIG_MEM_ALLOC_PROFILING -static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) -{ - int i; - struct alloc_tag *tag; - unsigned int nr_pages = 1 << new_order; - - if (!mem_alloc_profiling_enabled()) - return; - - tag = pgalloc_tag_get(&folio->page); - if (!tag) - return; - - for (i = nr_pages; i < (1 << old_order); i += nr_pages) { - union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i)); - - if (ref) { - /* Set new reference to point to the original tag */ - alloc_tag_ref_set(ref, tag); - put_page_tag_ref(ref); - } - } -} - -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) -{ - struct alloc_tag *tag; - union codetag_ref *ref; - - tag = pgalloc_tag_get(&old->page); - if (!tag) - return; - - ref = get_page_tag_ref(&new->page); - if (!ref) - return; - - /* Clear the old ref to the original allocation tag. */ - clear_page_tag_ref(&old->page); - /* Decrement the counters of the tag on get_new_folio. */ - alloc_tag_sub(ref, folio_nr_pages(new)); - - __alloc_tag_ref_set(ref, tag); - - put_page_tag_ref(ref); -} -#else /* !CONFIG_MEM_ALLOC_PROFILING */ -static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) -{ -} - -static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) -{ -} -#endif /* CONFIG_MEM_ALLOC_PROFILING */ +int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status); +int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status); +int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status); #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index f4fe593c1400..1b6a917fffa4 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -155,6 +155,11 @@ static inline int folio_lru_refs(struct folio *folio) return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; } +static inline void folio_clear_lru_refs(struct folio *folio) +{ + set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); +} + static inline int folio_lru_gen(struct folio *folio) { unsigned long flags = READ_ONCE(folio->flags); @@ -222,6 +227,7 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, { unsigned long seq; unsigned long flags; + unsigned long mask; int gen = folio_lru_gen(folio); int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); @@ -257,7 +263,14 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; /* see the comment on MIN_NR_GENS about PG_active */ - set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); + mask = LRU_GEN_MASK; + /* + * Don't clear PG_workingset here because it can affect PSI accounting + * if the activation is due to workingset refault. + */ + if (folio_test_active(folio)) + mask |= LRU_REFS_MASK | BIT(PG_referenced) | BIT(PG_active); + set_mask_bits(&folio->flags, mask, flags); lru_gen_update_size(lruvec, folio, -1, gen); /* for folio_rotate_reclaimable() */ @@ -291,6 +304,12 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, return true; } +static inline void folio_migrate_refs(struct folio *new, struct folio *old) +{ + unsigned long refs = READ_ONCE(old->flags) & LRU_REFS_MASK; + + set_mask_bits(&new->flags, LRU_REFS_MASK, refs); +} #else /* !CONFIG_LRU_GEN */ static inline bool lru_gen_enabled(void) @@ -313,6 +332,10 @@ static inline bool lru_gen_del_folio(struct lruvec *lruvec, struct folio *folio, return false; } +static inline void folio_migrate_refs(struct folio *new, struct folio *old) +{ + +} #endif /* CONFIG_LRU_GEN */ static __always_inline @@ -521,7 +544,7 @@ static inline pte_marker copy_pte_marker( { pte_marker srcm = pte_marker_get(entry); /* Always copy error entries. */ - pte_marker dstm = srcm & PTE_MARKER_POISONED; + pte_marker dstm = srcm & (PTE_MARKER_POISONED | PTE_MARKER_GUARD); /* Only copy PTE markers if UFFD register matches. */ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma)) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6e3bdf8e38bc..7361a8f3ab68 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -521,9 +521,6 @@ static_assert(sizeof(struct ptdesc) <= sizeof(struct page)); */ #define STRUCT_PAGE_MAX_SHIFT (order_base_2(sizeof(struct page))) -#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) -#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) - /* * page_private can be used on tail pages. However, PagePrivate is only * checked by the VM on the head page. So page_private on the tail pages @@ -542,21 +539,6 @@ static inline void *folio_get_private(struct folio *folio) return folio->private; } -struct page_frag_cache { - void * va; -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) - __u16 offset; - __u16 size; -#else - __u32 offset; -#endif - /* we maintain a pagecount bias, so that we dont dirty cache line - * containing page->_refcount every time we allocate a fragment. - */ - unsigned int pagecnt_bias; - bool pfmemalloc; -}; - typedef unsigned long vm_flags_t; /* @@ -782,6 +764,7 @@ struct vm_area_struct { struct mm_cid { u64 time; int cid; + int recent_cid; }; #endif @@ -852,6 +835,27 @@ struct mm_struct { * When the next mm_cid scan is due (in jiffies). */ unsigned long mm_cid_next_scan; + /** + * @nr_cpus_allowed: Number of CPUs allowed for mm. + * + * Number of CPUs allowed in the union of all mm's + * threads allowed CPUs. + */ + unsigned int nr_cpus_allowed; + /** + * @max_nr_cid: Maximum number of concurrency IDs allocated. + * + * Track the highest number of concurrency IDs allocated for the + * mm. + */ + atomic_t max_nr_cid; + /** + * @cpus_allowed_lock: Lock protecting mm cpus_allowed. + * + * Provide mutual exclusion for mm cpus_allowed and + * mm nr_cpus_allowed updates. + */ + raw_spinlock_t cpus_allowed_lock; #endif #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* size of all page tables */ @@ -1170,18 +1174,30 @@ static inline int mm_cid_clear_lazy_put(int cid) return cid & ~MM_CID_LAZY_PUT; } +/* + * mm_cpus_allowed: Union of all mm's threads allowed CPUs. + */ +static inline cpumask_t *mm_cpus_allowed(struct mm_struct *mm) +{ + unsigned long bitmap = (unsigned long)mm; + + bitmap += offsetof(struct mm_struct, cpu_bitmap); + /* Skip cpu_bitmap */ + bitmap += cpumask_size(); + return (struct cpumask *)bitmap; +} + /* Accessor for struct mm_struct's cidmask. */ static inline cpumask_t *mm_cidmask(struct mm_struct *mm) { - unsigned long cid_bitmap = (unsigned long)mm; + unsigned long cid_bitmap = (unsigned long)mm_cpus_allowed(mm); - cid_bitmap += offsetof(struct mm_struct, cpu_bitmap); - /* Skip cpu_bitmap */ + /* Skip mm_cpus_allowed */ cid_bitmap += cpumask_size(); return (struct cpumask *)cid_bitmap; } -static inline void mm_init_cid(struct mm_struct *mm) +static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { int i; @@ -1189,17 +1205,22 @@ static inline void mm_init_cid(struct mm_struct *mm) struct mm_cid *pcpu_cid = per_cpu_ptr(mm->pcpu_cid, i); pcpu_cid->cid = MM_CID_UNSET; + pcpu_cid->recent_cid = MM_CID_UNSET; pcpu_cid->time = 0; } + mm->nr_cpus_allowed = p->nr_cpus_allowed; + atomic_set(&mm->max_nr_cid, 0); + raw_spin_lock_init(&mm->cpus_allowed_lock); + cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); cpumask_clear(mm_cidmask(mm)); } -static inline int mm_alloc_cid_noprof(struct mm_struct *mm) +static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p) { mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid); if (!mm->pcpu_cid) return -ENOMEM; - mm_init_cid(mm); + mm_init_cid(mm, p); return 0; } #define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__)) @@ -1212,16 +1233,31 @@ static inline void mm_destroy_cid(struct mm_struct *mm) static inline unsigned int mm_cid_size(void) { - return cpumask_size(); + return 2 * cpumask_size(); /* mm_cpus_allowed(), mm_cidmask(). */ +} + +static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) +{ + struct cpumask *mm_allowed = mm_cpus_allowed(mm); + + if (!mm) + return; + /* The mm_cpus_allowed is the union of each thread allowed CPUs masks. */ + raw_spin_lock(&mm->cpus_allowed_lock); + cpumask_or(mm_allowed, mm_allowed, cpumask); + WRITE_ONCE(mm->nr_cpus_allowed, cpumask_weight(mm_allowed)); + raw_spin_unlock(&mm->cpus_allowed_lock); } #else /* CONFIG_SCHED_MM_CID */ -static inline void mm_init_cid(struct mm_struct *mm) { } -static inline int mm_alloc_cid(struct mm_struct *mm) { return 0; } +static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) { } +static inline int mm_alloc_cid(struct mm_struct *mm, struct task_struct *p) { return 0; } static inline void mm_destroy_cid(struct mm_struct *mm) { } + static inline unsigned int mm_cid_size(void) { return 0; } +static inline void mm_set_cpus_allowed(struct mm_struct *mm, const struct cpumask *cpumask) { } #endif /* CONFIG_SCHED_MM_CID */ struct mmu_gather; @@ -1499,4 +1535,88 @@ enum { /* See also internal only FOLL flags in mm/internal.h */ }; +/* mm flags */ + +/* + * The first two bits represent core dump modes for set-user-ID, + * the modes are SUID_DUMP_* defined in linux/sched/coredump.h + */ +#define MMF_DUMPABLE_BITS 2 +#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) +/* coredump filter bits */ +#define MMF_DUMP_ANON_PRIVATE 2 +#define MMF_DUMP_ANON_SHARED 3 +#define MMF_DUMP_MAPPED_PRIVATE 4 +#define MMF_DUMP_MAPPED_SHARED 5 +#define MMF_DUMP_ELF_HEADERS 6 +#define MMF_DUMP_HUGETLB_PRIVATE 7 +#define MMF_DUMP_HUGETLB_SHARED 8 +#define MMF_DUMP_DAX_PRIVATE 9 +#define MMF_DUMP_DAX_SHARED 10 + +#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS +#define MMF_DUMP_FILTER_BITS 9 +#define MMF_DUMP_FILTER_MASK \ + (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) +#define MMF_DUMP_FILTER_DEFAULT \ + ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ + (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) + +#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS +# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) +#else +# define MMF_DUMP_MASK_DEFAULT_ELF 0 +#endif + /* leave room for more dump flags */ +#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ +#define MMF_VM_HUGEPAGE 17 /* set when mm is available for khugepaged */ + +/* + * This one-shot flag is dropped due to necessity of changing exe once again + * on NFS restore + */ +//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ + +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ +#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ +#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ +#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ +#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ +#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) +#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */ +#define MMF_MULTIPROCESS 26 /* mm is shared between processes */ +/* + * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either + * replaced in the future by mm.pinned_vm when it becomes stable, or grow into + * a counter on its own. We're aggresive on this bit for now: even if the + * pinned pages were unpinned later on, we'll still keep this bit set for the + * lifecycle of this mm, just for simplicity. + */ +#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */ + +#define MMF_HAS_MDWE 28 +#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE) + + +#define MMF_HAS_MDWE_NO_INHERIT 29 + +#define MMF_VM_MERGE_ANY 30 +#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) + +#define MMF_TOPDOWN 31 /* mm searches top down by default */ +#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN) + +#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ + MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ + MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK) + +static inline unsigned long mmf_init_flags(unsigned long flags) +{ + if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT)) + flags &= ~((1UL << MMF_HAS_MDWE) | + (1UL << MMF_HAS_MDWE_NO_INHERIT)); + return flags & MMF_INIT_MASK; +} + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index bff5706b76e1..a82aa80c0ba4 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -8,6 +8,7 @@ * (These are defined separately to decouple sched.h from mm_types.h as much as possible.) */ +#include <linux/align.h> #include <linux/types.h> #include <asm/page.h> @@ -43,6 +44,26 @@ struct page_frag { #endif }; +#define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) +#define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) +struct page_frag_cache { + /* encoded_page consists of the virtual address, pfmemalloc bit and + * order of a page. + */ + unsigned long encoded_page; + + /* we maintain a pagecount bias, so that we dont dirty cache line + * containing page->_refcount every time we allocate a fragment. + */ +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) && (BITS_PER_LONG <= 32) + __u16 offset; + __u16 pagecnt_bias; +#else + __u32 offset; + __u32 pagecnt_bias; +#endif +}; + /* Track pages that require TLB flushes */ struct tlbflush_unmap_batch { #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index f34407cc2788..526fce581657 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -35,7 +35,7 @@ struct mmc_csd { unsigned int wp_grp_size; unsigned int read_blkbits; unsigned int write_blkbits; - unsigned int capacity; + sector_t capacity; unsigned int read_partial:1, read_misalign:1, write_partial:1, @@ -188,6 +188,12 @@ struct sd_switch_caps { #define SD_MAX_CURRENT_400 (1 << SD_SET_CURRENT_LIMIT_400) #define SD_MAX_CURRENT_600 (1 << SD_SET_CURRENT_LIMIT_600) #define SD_MAX_CURRENT_800 (1 << SD_SET_CURRENT_LIMIT_800) + +#define SD4_SET_POWER_LIMIT_0_72W 0 +#define SD4_SET_POWER_LIMIT_1_44W 1 +#define SD4_SET_POWER_LIMIT_2_16W 2 +#define SD4_SET_POWER_LIMIT_2_88W 3 +#define SD4_SET_POWER_LIMIT_1_80W 4 }; struct sd_ext_reg { @@ -209,6 +215,34 @@ struct sd_ext_reg { #define SD_EXT_PERF_CMD_QUEUE (1<<4) }; +struct sd_uhs2_config { + u32 node_id; + + u32 n_fcu; + u32 maxblk_len; + u8 n_lanes; + u8 dadr_len; + u8 app_type; + u8 phy_minor_rev; + u8 phy_major_rev; + u8 can_hibernate; + u8 n_lss_sync; + u8 n_lss_dir; + u8 link_minor_rev; + u8 link_major_rev; + u8 dev_type; + u8 n_data_gap; + + u32 n_fcu_set; + u32 maxblk_len_set; + u8 n_lanes_set; + u8 speed_range_set; + u8 n_lss_sync_set; + u8 n_lss_dir_set; + u8 n_data_gap_set; + u8 max_retry_set; +}; + struct sdio_cccr { unsigned int sdio_vsn; unsigned int sd_vsn; @@ -294,6 +328,7 @@ struct mmc_card { #define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */ #define MMC_QUIRK_BROKEN_SD_CACHE (1<<15) /* Disable broken SD cache support */ #define MMC_QUIRK_BROKEN_CACHE_FLUSH (1<<16) /* Don't flush cache until the write has occurred */ +#define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY (1<<17) /* Disable broken SD poweroff notify support */ bool written_flag; /* Indicates eMMC has been written since power on */ bool reenable_cmdq; /* Re-enable Command Queue */ @@ -319,6 +354,8 @@ struct mmc_card { struct sd_ext_reg ext_power; /* SD extension reg for PM */ struct sd_ext_reg ext_perf; /* SD extension reg for PERF */ + struct sd_uhs2_config uhs2_config; /* SD UHS-II config */ + unsigned int sdio_funcs; /* number of SDIO functions */ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */ struct sdio_cccr cccr; /* common card info */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index f0ac2e469b32..56972bd78462 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -11,6 +11,20 @@ struct mmc_data; struct mmc_request; +#define UHS2_MAX_PAYLOAD_LEN 2 +#define UHS2_MAX_RESP_LEN 20 + +struct uhs2_command { + u16 header; + u16 arg; + __be32 payload[UHS2_MAX_PAYLOAD_LEN]; + u8 payload_len; + u8 packet_len; + u8 tmode_half_duplex; + u8 uhs2_resp[UHS2_MAX_RESP_LEN]; /* UHS2 native cmd resp */ + u8 uhs2_resp_len; /* UHS2 native cmd resp len */ +}; + struct mmc_command { u32 opcode; u32 arg; @@ -96,6 +110,12 @@ struct mmc_command { unsigned int busy_timeout; /* busy detect timeout in ms */ struct mmc_data *data; /* data segment associated with cmd */ struct mmc_request *mrq; /* associated request */ + + struct uhs2_command *uhs2_cmd; /* UHS2 command */ + + /* for SDUC */ + bool has_ext_addr; + u8 ext_addr; }; struct mmc_data { @@ -154,6 +174,7 @@ struct mmc_request { const struct bio_crypt_ctx *crypto_ctx; int crypto_key_slot; #endif + struct uhs2_command uhs2_cmd; }; struct mmc_card; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 8fc2b328ec4d..f166d6611ddb 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -17,6 +17,7 @@ #include <linux/mmc/pm.h> #include <linux/dma-direction.h> #include <linux/blk-crypto-profile.h> +#include <linux/mmc/sd_uhs2.h> struct mmc_ios { unsigned int clock; /* clock rate */ @@ -64,6 +65,10 @@ struct mmc_ios { #define MMC_TIMING_MMC_HS400 10 #define MMC_TIMING_SD_EXP 11 #define MMC_TIMING_SD_EXP_1_2V 12 +#define MMC_TIMING_UHS2_SPEED_A 13 +#define MMC_TIMING_UHS2_SPEED_A_HD 14 +#define MMC_TIMING_UHS2_SPEED_B 15 +#define MMC_TIMING_UHS2_SPEED_B_HD 16 unsigned char signal_voltage; /* signalling voltage (1.8V or 3.3V) */ @@ -71,6 +76,9 @@ struct mmc_ios { #define MMC_SIGNAL_VOLTAGE_180 1 #define MMC_SIGNAL_VOLTAGE_120 2 + unsigned char vqmmc2_voltage; +#define MMC_VQMMC2_VOLTAGE_180 0 + unsigned char drv_type; /* driver type (A, B, C, D) */ #define MMC_SET_DRIVER_TYPE_B 0 @@ -92,6 +100,43 @@ struct mmc_clk_phase_map { struct mmc_clk_phase phase[MMC_NUM_CLK_PHASES]; }; +struct sd_uhs2_caps { + u32 dap; + u32 gap; + u32 group_desc; + u32 maxblk_len; + u32 n_fcu; + u8 n_lanes; + u8 addr64; + u8 card_type; + u8 phy_rev; + u8 speed_range; + u8 n_lss_sync; + u8 n_lss_dir; + u8 link_rev; + u8 host_type; + u8 n_data_gap; + + u32 maxblk_len_set; + u32 n_fcu_set; + u8 n_lanes_set; + u8 n_lss_sync_set; + u8 n_lss_dir_set; + u8 n_data_gap_set; + u8 max_retry_set; +}; + +enum sd_uhs2_operation { + UHS2_PHY_INIT = 0, + UHS2_SET_CONFIG, + UHS2_ENABLE_INT, + UHS2_DISABLE_INT, + UHS2_ENABLE_CLK, + UHS2_DISABLE_CLK, + UHS2_CHECK_DORMANT, + UHS2_SET_IOS, +}; + struct mmc_host; enum mmc_err_stat { @@ -219,6 +264,14 @@ struct mmc_host_ops { /* Initialize an SD express card, mandatory for MMC_CAP2_SD_EXP. */ int (*init_sd_express)(struct mmc_host *host, struct mmc_ios *ios); + + /* + * The uhs2_control callback is used to execute SD UHS-II specific + * operations. It's mandatory to implement for hosts that supports the + * SD UHS-II interface (MMC_CAP2_SD_UHS2). Expected return values are a + * negative errno in case of a failure or zero for success. + */ + int (*uhs2_control)(struct mmc_host *host, enum sd_uhs2_operation op); }; struct mmc_cqe_ops { @@ -288,6 +341,7 @@ struct mmc_pwrseq; struct mmc_supply { struct regulator *vmmc; /* Card power supply */ struct regulator *vqmmc; /* Optional Vccq supply */ + struct regulator *vqmmc2; /* Optional supply for phy */ }; struct mmc_ctx { @@ -379,6 +433,7 @@ struct mmc_host { MMC_CAP2_HS200_1_2V_SDR) #define MMC_CAP2_SD_EXP (1 << 7) /* SD express via PCIe */ #define MMC_CAP2_SD_EXP_1_2V (1 << 8) /* SD express 1.2V */ +#define MMC_CAP2_SD_UHS2 (1 << 9) /* SD UHS-II support */ #define MMC_CAP2_CD_ACTIVE_HIGH (1 << 10) /* Card-detect signal active high */ #define MMC_CAP2_RO_ACTIVE_HIGH (1 << 11) /* Write-protect signal active high */ #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ @@ -405,6 +460,10 @@ struct mmc_host { #endif #define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */ + bool uhs2_sd_tran; /* UHS-II flag for SD_TRAN state */ + bool uhs2_app_cmd; /* UHS-II flag for APP command */ + struct sd_uhs2_caps uhs2_caps; /* Host UHS-II capabilities */ + int fixed_drv_type; /* fixed driver type for non-removable media */ mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -567,6 +626,7 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, unsigned short vdd_bit); int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); +int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, struct mmc_ios *ios); #else static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, struct regulator *supply, @@ -580,6 +640,12 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, { return -EINVAL; } + +static inline int mmc_regulator_set_vqmmc2(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + return -EINVAL; +} #endif int mmc_regulator_get_supply(struct mmc_host *mmc); @@ -615,6 +681,14 @@ static inline int mmc_card_uhs(struct mmc_card *card) card->host->ios.timing <= MMC_TIMING_UHS_DDR50; } +static inline bool mmc_card_uhs2(struct mmc_host *host) +{ + return host->ios.timing == MMC_TIMING_UHS2_SPEED_A || + host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD || + host->ios.timing == MMC_TIMING_UHS2_SPEED_B || + host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD; +} + void mmc_retune_timer_stop(struct mmc_host *host); static inline void mmc_retune_needed(struct mmc_host *host) @@ -649,6 +723,12 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host, host->err_stats[stat] += 1; } +static inline int mmc_card_uhs2_hd_mode(struct mmc_host *host) +{ + return host->ios.timing == MMC_TIMING_UHS2_SPEED_A_HD || + host->ios.timing == MMC_TIMING_UHS2_SPEED_B_HD; +} + int mmc_sd_switch(struct mmc_card *card, bool mode, int group, u8 value, u8 *resp); int mmc_send_status(struct mmc_card *card, u32 *status); diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 6727576a8755..af5fc70e09a2 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h @@ -15,6 +15,9 @@ #define SD_SEND_IF_COND 8 /* bcr [11:0] See below R7 */ #define SD_SWITCH_VOLTAGE 11 /* ac R1 */ +/* Class 2 */ +#define SD_ADDR_EXT 22 /* ac [5:0] R1 */ + /* class 10 */ #define SD_SWITCH 6 /* adtc [31:0] See below R1 */ @@ -36,6 +39,7 @@ /* OCR bit definitions */ #define SD_OCR_S18R (1 << 24) /* 1.8V switching request */ #define SD_ROCR_S18A SD_OCR_S18R /* 1.8V switching accepted by card */ +#define SD_OCR_2T (1 << 27) /* HO2T/CO2T - SDUC support */ #define SD_OCR_XPC (1 << 28) /* SDXC power control */ #define SD_OCR_CCS (1 << 30) /* Card Capacity Status */ diff --git a/include/linux/mmc/sd_uhs2.h b/include/linux/mmc/sd_uhs2.h new file mode 100644 index 000000000000..7abe9bd870c7 --- /dev/null +++ b/include/linux/mmc/sd_uhs2.h @@ -0,0 +1,240 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Header file for UHS-II packets, Host Controller registers and I/O + * accessors. + * + * Copyright (C) 2014 Intel Corp, All Rights Reserved. + */ +#ifndef LINUX_MMC_UHS2_H +#define LINUX_MMC_UHS2_H + +/* LINK Layer definition */ +/* + * UHS2 Header: + * Refer to UHS-II Addendum Version 1.02 Figure 5-2, the format of CCMD Header is described below: + * bit [3:0] : DID(Destination ID = Node ID of UHS2 card) + * bit [6:4] : TYP(Packet Type) + * 000b: CCMD(Control command packet) + * 001b: DCMD(Data command packet) + * 010b: RES(Response packet) + * 011b: DATA(Data payload packet) + * 111b: MSG(Message packet) + * Others: Reserved + * bit [7] : NP(Native Packet) + * bit [10:8] : TID(Transaction ID) + * bit [11] : Reserved + * bit [15:12]: SID(Source ID 0: Node ID of Host) + * + * Broadcast CCMD issued by Host is represented as DID=SID=0. + */ +/* + * UHS2 Argument: + * Refer to UHS-II Addendum Version 1.02 Figure 6-5, the format of CCMD Argument is described below: + * bit [3:0] : MSB of IOADR + * bit [5:4] : PLEN(Payload Length) + * 00b: 0 byte + * 01b: 4 bytes + * 10b: 8 bytes + * 11b: 16 bytes + * bit [6] : Reserved + * bit [7] : R/W(Read/Write) + * 0: Control read command + * 1: Control write command + * bit [15:8] : LSB of IOADR + * + * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD. + * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last. + */ +#define UHS2_NATIVE_PACKET_POS 7 +#define UHS2_NATIVE_PACKET (1 << UHS2_NATIVE_PACKET_POS) + +#define UHS2_PACKET_TYPE_POS 4 +#define UHS2_PACKET_TYPE_CCMD (0 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_DCMD (1 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_RES (2 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_DATA (3 << UHS2_PACKET_TYPE_POS) +#define UHS2_PACKET_TYPE_MSG (7 << UHS2_PACKET_TYPE_POS) + +#define UHS2_DEST_ID_MASK 0x0F +#define UHS2_DEST_ID 0x1 + +#define UHS2_SRC_ID_POS 12 +#define UHS2_SRC_ID_MASK 0xF000 + +#define UHS2_TRANS_ID_POS 8 +#define UHS2_TRANS_ID_MASK 0x0700 + +/* UHS2 MSG */ +#define UHS2_MSG_CTG_POS 5 +#define UHS2_MSG_CTG_LMSG 0x00 +#define UHS2_MSG_CTG_INT 0x60 +#define UHS2_MSG_CTG_AMSG 0x80 + +#define UHS2_MSG_CTG_FCREQ 0x00 +#define UHS2_MSG_CTG_FCRDY 0x01 +#define UHS2_MSG_CTG_STAT 0x02 + +#define UHS2_MSG_CODE_POS 8 +#define UHS2_MSG_CODE_FC_UNRECOVER_ERR 0x8 +#define UHS2_MSG_CODE_STAT_UNRECOVER_ERR 0x8 +#define UHS2_MSG_CODE_STAT_RECOVER_ERR 0x1 + +/* TRANS Layer definition */ + +/* Native packets*/ +#define UHS2_NATIVE_CMD_RW_POS 7 +#define UHS2_NATIVE_CMD_WRITE (1 << UHS2_NATIVE_CMD_RW_POS) +#define UHS2_NATIVE_CMD_READ (0 << UHS2_NATIVE_CMD_RW_POS) + +#define UHS2_NATIVE_CMD_PLEN_POS 4 +#define UHS2_NATIVE_CMD_PLEN_4B (1 << UHS2_NATIVE_CMD_PLEN_POS) +#define UHS2_NATIVE_CMD_PLEN_8B (2 << UHS2_NATIVE_CMD_PLEN_POS) +#define UHS2_NATIVE_CMD_PLEN_16B (3 << UHS2_NATIVE_CMD_PLEN_POS) + +#define UHS2_NATIVE_CCMD_GET_MIOADR_MASK 0xF00 +#define UHS2_NATIVE_CCMD_MIOADR_MASK 0x0F + +#define UHS2_NATIVE_CCMD_LIOADR_POS 8 +#define UHS2_NATIVE_CCMD_GET_LIOADR_MASK 0x0FF + +#define UHS2_CCMD_DEV_INIT_COMPLETE_FLAG BIT(11) +#define UHS2_DEV_INIT_PAYLOAD_LEN 1 +#define UHS2_DEV_INIT_RESP_LEN 6 +#define UHS2_DEV_ENUM_PAYLOAD_LEN 1 +#define UHS2_DEV_ENUM_RESP_LEN 8 +#define UHS2_CFG_WRITE_PAYLOAD_LEN 2 +#define UHS2_CFG_WRITE_PHY_SET_RESP_LEN 4 +#define UHS2_CFG_WRITE_GENERIC_SET_RESP_LEN 5 +#define UHS2_GO_DORMANT_PAYLOAD_LEN 1 + +/* + * UHS2 Argument: + * Refer to UHS-II Addendum Version 1.02 Figure 6-8, the format of DCMD Argument is described below: + * bit [3:0] : Reserved + * bit [6:3] : TMODE(Transfer Mode) + * bit 3: DAM(Data Access Mode) + * bit 4: TLUM(TLEN Unit Mode) + * bit 5: LM(Length Mode) + * bit 6: DM(Duplex Mode) + * bit [7] : R/W(Read/Write) + * 0: Control read command + * 1: Control write command + * bit [15:8] : Reserved + * + * I/O Address specifies the address of register in UHS-II I/O space accessed by CCMD. + * The unit of I/O Address is 4 Bytes. It is transmitted in MSB first, LSB last. + */ +#define UHS2_DCMD_DM_POS 6 +#define UHS2_DCMD_2L_HD_MODE (1 << UHS2_DCMD_DM_POS) +#define UHS2_DCMD_LM_POS 5 +#define UHS2_DCMD_LM_TLEN_EXIST (1 << UHS2_DCMD_LM_POS) +#define UHS2_DCMD_TLUM_POS 4 +#define UHS2_DCMD_TLUM_BYTE_MODE (1 << UHS2_DCMD_TLUM_POS) +#define UHS2_NATIVE_DCMD_DAM_POS 3 +#define UHS2_NATIVE_DCMD_DAM_IO (1 << UHS2_NATIVE_DCMD_DAM_POS) + +#define UHS2_RES_NACK_POS 7 +#define UHS2_RES_NACK_MASK (0x1 << UHS2_RES_NACK_POS) + +#define UHS2_RES_ECODE_POS 4 +#define UHS2_RES_ECODE_MASK 0x7 +#define UHS2_RES_ECODE_COND 1 +#define UHS2_RES_ECODE_ARG 2 +#define UHS2_RES_ECODE_GEN 3 + +/* IOADR of device registers */ +#define UHS2_IOADR_GENERIC_CAPS 0x00 +#define UHS2_IOADR_PHY_CAPS 0x02 +#define UHS2_IOADR_LINK_CAPS 0x04 +#define UHS2_IOADR_RSV_CAPS 0x06 +#define UHS2_IOADR_GENERIC_SETTINGS 0x08 +#define UHS2_IOADR_PHY_SETTINGS 0x0A +#define UHS2_IOADR_LINK_SETTINGS 0x0C +#define UHS2_IOADR_PRESET 0x40 + +/* SD application packets */ +#define UHS2_SD_CMD_INDEX_POS 8 + +#define UHS2_SD_CMD_APP_POS 14 +#define UHS2_SD_CMD_APP (1 << UHS2_SD_CMD_APP_POS) + +/* UHS-II Device Registers */ +#define UHS2_DEV_CONFIG_REG 0x000 + +/* General Caps and Settings registers */ +#define UHS2_DEV_CONFIG_GEN_CAPS (UHS2_DEV_CONFIG_REG + 0x000) +#define UHS2_DEV_CONFIG_N_LANES_POS 8 +#define UHS2_DEV_CONFIG_N_LANES_MASK 0x3F +#define UHS2_DEV_CONFIG_2L_HD_FD 0x1 +#define UHS2_DEV_CONFIG_2D1U_FD 0x2 +#define UHS2_DEV_CONFIG_1D2U_FD 0x4 +#define UHS2_DEV_CONFIG_2D2U_FD 0x8 +#define UHS2_DEV_CONFIG_DADR_POS 14 +#define UHS2_DEV_CONFIG_DADR_MASK 0x1 +#define UHS2_DEV_CONFIG_APP_POS 16 +#define UHS2_DEV_CONFIG_APP_MASK 0xFF +#define UHS2_DEV_CONFIG_APP_SD_MEM 0x1 + +#define UHS2_DEV_CONFIG_GEN_SET (UHS2_DEV_CONFIG_REG + 0x008) +#define UHS2_DEV_CONFIG_GEN_SET_N_LANES_POS 8 +#define UHS2_DEV_CONFIG_GEN_SET_2L_FD_HD 0x0 +#define UHS2_DEV_CONFIG_GEN_SET_2D1U_FD 0x2 +#define UHS2_DEV_CONFIG_GEN_SET_1D2U_FD 0x3 +#define UHS2_DEV_CONFIG_GEN_SET_2D2U_FD 0x4 +#define UHS2_DEV_CONFIG_GEN_SET_CFG_COMPLETE BIT(31) + +/* PHY Caps and Settings registers */ +#define UHS2_DEV_CONFIG_PHY_CAPS (UHS2_DEV_CONFIG_REG + 0x002) +#define UHS2_DEV_CONFIG_PHY_MINOR_MASK 0xF +#define UHS2_DEV_CONFIG_PHY_MAJOR_POS 4 +#define UHS2_DEV_CONFIG_PHY_MAJOR_MASK 0x3 +#define UHS2_DEV_CONFIG_CAN_HIBER_POS 15 +#define UHS2_DEV_CONFIG_CAN_HIBER_MASK 0x1 +#define UHS2_DEV_CONFIG_PHY_CAPS1 (UHS2_DEV_CONFIG_REG + 0x003) +#define UHS2_DEV_CONFIG_N_LSS_SYN_MASK 0xF +#define UHS2_DEV_CONFIG_N_LSS_DIR_POS 4 +#define UHS2_DEV_CONFIG_N_LSS_DIR_MASK 0xF + +#define UHS2_DEV_CONFIG_PHY_SET (UHS2_DEV_CONFIG_REG + 0x00A) +#define UHS2_DEV_CONFIG_PHY_SET_SPEED_POS 6 +#define UHS2_DEV_CONFIG_PHY_SET_SPEED_A 0x0 +#define UHS2_DEV_CONFIG_PHY_SET_SPEED_B 0x1 + +/* LINK-TRAN Caps and Settings registers */ +#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS (UHS2_DEV_CONFIG_REG + 0x004) +#define UHS2_DEV_CONFIG_LT_MINOR_MASK 0xF +#define UHS2_DEV_CONFIG_LT_MAJOR_POS 4 +#define UHS2_DEV_CONFIG_LT_MAJOR_MASK 0x3 +#define UHS2_DEV_CONFIG_N_FCU_POS 8 +#define UHS2_DEV_CONFIG_N_FCU_MASK 0xFF +#define UHS2_DEV_CONFIG_DEV_TYPE_POS 16 +#define UHS2_DEV_CONFIG_DEV_TYPE_MASK 0x7 +#define UHS2_DEV_CONFIG_MAX_BLK_LEN_POS 20 +#define UHS2_DEV_CONFIG_MAX_BLK_LEN_MASK 0xFFF +#define UHS2_DEV_CONFIG_LINK_TRAN_CAPS1 (UHS2_DEV_CONFIG_REG + 0x005) +#define UHS2_DEV_CONFIG_N_DATA_GAP_MASK 0xFF + +#define UHS2_DEV_CONFIG_LINK_TRAN_SET (UHS2_DEV_CONFIG_REG + 0x00C) +#define UHS2_DEV_CONFIG_LT_SET_MAX_BLK_LEN 0x200 +#define UHS2_DEV_CONFIG_LT_SET_MAX_RETRY_POS 16 + +/* Preset register */ +#define UHS2_DEV_CONFIG_PRESET (UHS2_DEV_CONFIG_REG + 0x040) + +#define UHS2_DEV_INT_REG 0x100 + +#define UHS2_DEV_STATUS_REG 0x180 + +#define UHS2_DEV_CMD_REG 0x200 +#define UHS2_DEV_CMD_FULL_RESET (UHS2_DEV_CMD_REG + 0x000) +#define UHS2_DEV_CMD_GO_DORMANT_STATE (UHS2_DEV_CMD_REG + 0x001) +#define UHS2_DEV_CMD_DORMANT_HIBER BIT(7) +#define UHS2_DEV_CMD_DEVICE_INIT (UHS2_DEV_CMD_REG + 0x002) +#define UHS2_DEV_INIT_COMPLETE_FLAG BIT(11) +#define UHS2_DEV_CMD_ENUMERATE (UHS2_DEV_CMD_REG + 0x003) +#define UHS2_DEV_CMD_TRANS_ABORT (UHS2_DEV_CMD_REG + 0x004) + +#define UHS2_RCLK_MAX 52000000 +#define UHS2_RCLK_MIN 26000000 + +#endif /* LINUX_MMC_UHS2_H */ diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 39a7714605a7..d7cb1e5ecbda 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -46,7 +46,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); } \ } while (0) #define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \ - static bool __section(".data.once") __warned; \ + static bool __section(".data..once") __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ @@ -66,7 +66,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); unlikely(__ret_warn); \ }) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) ({ \ - static bool __section(".data.once") __warned; \ + static bool __section(".data..once") __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ @@ -77,7 +77,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); unlikely(__ret_warn_once); \ }) #define VM_WARN_ON_ONCE_MM(cond, mm) ({ \ - static bool __section(".data.once") __warned; \ + static bool __section(".data..once") __warned; \ int __ret_warn_once = !!(cond); \ \ if (unlikely(__ret_warn_once && !__warned)) { \ diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index d39ebb10caeb..e2dd57ca368b 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h @@ -606,6 +606,13 @@ static inline int mmu_notifier_clear_flush_young(struct mm_struct *mm, return 0; } +static inline int mmu_notifier_clear_young(struct mm_struct *mm, + unsigned long start, + unsigned long end) +{ + return 0; +} + static inline int mmu_notifier_test_young(struct mm_struct *mm, unsigned long address) { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 80bc5640bb60..b36124145a16 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -220,6 +220,9 @@ enum node_stat_item { PGDEMOTE_KSWAPD, PGDEMOTE_DIRECT, PGDEMOTE_KHUGEPAGED, +#ifdef CONFIG_HUGETLB_PAGE + NR_HUGETLB, +#endif NR_VM_NODE_STAT_ITEMS }; @@ -403,6 +406,8 @@ enum { NR_LRU_GEN_CAPS }; +#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) + #define MIN_LRU_BATCH BITS_PER_LONG #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) diff --git a/include/linux/module.h b/include/linux/module.h index 88ecc5e9f523..c60ee39cb9b1 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -247,7 +247,7 @@ extern void cleanup_module(void); #ifdef MODULE /* Creates an alias so file2alias.c can find device table. */ #define MODULE_DEVICE_TABLE(type, name) \ -extern typeof(name) __mod_##type##__##name##_device_table \ +extern typeof(name) __mod_device_table__##type##__##name \ __attribute__ ((unused, alias(__stringify(name)))) #else /* !MODULE */ #define MODULE_DEVICE_TABLE(type, name) @@ -367,6 +367,8 @@ enum mod_mem_type { struct module_memory { void *base; + void *rw_copy; + bool is_rox; unsigned int size; #ifdef CONFIG_MODULES_TREE_LOOKUP @@ -767,6 +769,15 @@ static inline bool is_livepatch_module(struct module *mod) void set_module_sig_enforced(void); +void *__module_writable_address(struct module *mod, void *loc); + +static inline void *module_writable_address(struct module *mod, void *loc) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_EXECMEM_ROX) || !mod) + return loc; + return __module_writable_address(mod, loc); +} + #else /* !CONFIG_MODULES... */ static inline struct module *__module_address(unsigned long addr) @@ -874,6 +885,11 @@ static inline bool module_is_coming(struct module *mod) { return false; } + +static inline void *module_writable_address(struct module *mod, void *loc) +{ + return loc; +} #endif /* CONFIG_MODULES */ #ifdef CONFIG_SYSFS diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h index e395461d59e5..1f5507ba5a12 100644 --- a/include/linux/moduleloader.h +++ b/include/linux/moduleloader.h @@ -108,6 +108,10 @@ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod); +int module_post_finalize(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + struct module *mod); + #ifdef CONFIG_MODULES void flush_module_init_free_work(void); #else diff --git a/include/linux/mtd/nand-ecc-mxic.h b/include/linux/mtd/nand-ecc-mxic.h index b125926e458c..0da4b2999576 100644 --- a/include/linux/mtd/nand-ecc-mxic.h +++ b/include/linux/mtd/nand-ecc-mxic.h @@ -16,7 +16,7 @@ struct mxic_ecc_engine; #if IS_ENABLED(CONFIG_MTD_NAND_ECC_MXIC) && IS_REACHABLE(CONFIG_MTD_NAND_CORE) -struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void); +const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void); struct nand_ecc_engine *mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev); void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng); int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, @@ -24,7 +24,7 @@ int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng, #else /* !CONFIG_MTD_NAND_ECC_MXIC */ -static inline struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) +static inline const struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void) { return NULL; } diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 1e4208040956..0e2f228e8b4a 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -293,7 +293,7 @@ enum nand_ecc_engine_integration { struct nand_ecc_engine { struct device *dev; struct list_head node; - struct nand_ecc_engine_ops *ops; + const struct nand_ecc_engine_ops *ops; enum nand_ecc_engine_integration integration; void *priv; }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 8896705ccd63..ecc686409161 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -52,6 +52,7 @@ #include <net/net_trackers.h> #include <net/net_debug.h> #include <net/dropreason-core.h> +#include <net/neighbour_tables.h> struct netpoll_info; struct device; @@ -343,6 +344,16 @@ struct gro_list { #define GRO_HASH_BUCKETS 8 /* + * Structure for per-NAPI config + */ +struct napi_config { + u64 gro_flush_timeout; + u64 irq_suspend_timeout; + u32 defer_hard_irqs; + unsigned int napi_id; +}; + +/* * Structure for NAPI scheduling similar to tasklet but with weighting */ struct napi_struct { @@ -373,10 +384,15 @@ struct napi_struct { unsigned int napi_id; struct hrtimer timer; struct task_struct *thread; + unsigned long gro_flush_timeout; + unsigned long irq_suspend_timeout; + u32 defer_hard_irqs; /* control-path-only fields follow */ struct list_head dev_list; struct hlist_node napi_hash_node; int irq; + int index; + struct napi_config *config; }; enum { @@ -1232,12 +1248,17 @@ struct netdev_net_notifier { * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, * const unsigned char *addr, u16 vid, u16 flags, - * struct netlink_ext_ack *extack); + * bool *notified, struct netlink_ext_ack *extack); * Adds an FDB entry to dev for addr. + * Callee shall set *notified to true if it sent any appropriate + * notification(s). Otherwise core will send a generic one. * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, - * const unsigned char *addr, u16 vid) + * const unsigned char *addr, u16 vid + * bool *notified, struct netlink_ext_ack *extack); * Deletes the FDB entry from dev corresponding to addr. + * Callee shall set *notified to true if it sent any appropriate + * notification(s). Otherwise core will send a generic one. * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, * struct netlink_ext_ack *extack); * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, @@ -1412,8 +1433,7 @@ struct net_device_ops { __be16 proto, u16 vid); #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); - int (*ndo_netpoll_setup)(struct net_device *dev, - struct netpoll_info *info); + int (*ndo_netpoll_setup)(struct net_device *dev); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, @@ -1510,12 +1530,15 @@ struct net_device_ops { const unsigned char *addr, u16 vid, u16 flags, + bool *notified, struct netlink_ext_ack *extack); int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], struct net_device *dev, const unsigned char *addr, - u16 vid, struct netlink_ext_ack *extack); + u16 vid, + bool *notified, + struct netlink_ext_ack *extack); int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, struct netlink_ext_ack *extack); @@ -1603,6 +1626,14 @@ struct net_device_ops { int (*ndo_hwtstamp_set)(struct net_device *dev, struct kernel_hwtstamp_config *kernel_config, struct netlink_ext_ack *extack); + +#if IS_ENABLED(CONFIG_NET_SHAPER) + /** + * @net_shaper_ops: Device shaping offload operations + * see include/net/net_shapers.h + */ + const struct net_shaper_ops *net_shaper_ops; +#endif }; /** @@ -1773,7 +1804,6 @@ enum netdev_reg_state { * @wireless_handlers: List of functions to handle Wireless Extensions, * instead of ioctl, * see <net/iw_handler.h> for details. - * @wireless_data: Instance data managed by the core of wireless extensions * * @netdev_ops: Includes several pointers to callbacks, * if one wants to override the ndo_*() functions @@ -1858,9 +1888,6 @@ enum netdev_reg_state { * allocated at register_netdev() time * @real_num_rx_queues: Number of RX queues currently active in device * @xdp_prog: XDP sockets filter program pointer - * @gro_flush_timeout: timeout for GRO layer in NAPI - * @napi_defer_hard_irqs: If not zero, provides a counter that would - * allow to avoid NIC hard IRQ, on busy queues. * * @rx_handler: handler for received packets * @rx_handler_data: XXX: need comments on this one @@ -2009,6 +2036,16 @@ enum netdev_reg_state { * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem, * where the clock is recovered. * + * @max_pacing_offload_horizon: max EDT offload horizon in nsec. + * @napi_config: An array of napi_config structures containing per-NAPI + * settings. + * @gro_flush_timeout: timeout for GRO layer in NAPI + * @napi_defer_hard_irqs: If not zero, provides a counter that would + * allow to avoid NIC hard IRQ, on busy queues. + * + * @neighbours: List heads pointing to this device's neighbours' + * dev_list, one per address-family. + * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ @@ -2074,8 +2111,6 @@ struct net_device { int ifindex; unsigned int real_num_rx_queues; struct netdev_rx_queue *_rx; - unsigned long gro_flush_timeout; - u32 napi_defer_hard_irqs; unsigned int gro_max_size; unsigned int gro_ipv4_max_size; rx_handler_func_t __rcu *rx_handler; @@ -2150,7 +2185,6 @@ struct net_device { #ifdef CONFIG_WIRELESS_EXT const struct iw_handler_def *wireless_handlers; - struct iw_public_data *wireless_data; #endif const struct ethtool_ops *ethtool_ops; #ifdef CONFIG_NET_L3_MASTER_DEV @@ -2209,6 +2243,9 @@ struct net_device { /* Protocol-specific pointers */ struct in_device __rcu *ip_ptr; + /** @fib_nh_head: nexthops associated with this netdev */ + struct hlist_head fib_nh_head; + #if IS_ENABLED(CONFIG_VLAN_8021Q) struct vlan_info __rcu *vlan_info; #endif @@ -2399,6 +2436,27 @@ struct net_device { /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ struct dim_irq_moder *irq_moder; + u64 max_pacing_offload_horizon; + struct napi_config *napi_config; + unsigned long gro_flush_timeout; + u32 napi_defer_hard_irqs; + + /** + * @lock: protects @net_shaper_hierarchy, feel free to use for other + * netdev-scope protection. Ordering: take after rtnl_lock. + */ + struct mutex lock; + +#if IS_ENABLED(CONFIG_NET_SHAPER) + /** + * @net_shaper_hierarchy: data tracking the current shaper status + * see include/net/net_shapers.h + */ + struct net_shaper_hierarchy *net_shaper_hierarchy; +#endif + + struct hlist_head neighbours[NEIGH_NR_TABLES]; + u8 priv[] ____cacheline_aligned __counted_by(priv_len); } ____cacheline_aligned; @@ -2649,6 +2707,22 @@ netif_napi_add_tx_weight(struct net_device *dev, } /** + * netif_napi_add_config - initialize a NAPI context with persistent config + * @dev: network device + * @napi: NAPI context + * @poll: polling function + * @index: the NAPI index + */ +static inline void +netif_napi_add_config(struct net_device *dev, struct napi_struct *napi, + int (*poll)(struct napi_struct *, int), int index) +{ + napi->index = index; + napi->config = &dev->napi_config[index]; + netif_napi_add_weight(dev, napi, poll, NAPI_POLL_WEIGHT); +} + +/** * netif_napi_add_tx() - initialize a NAPI context to be used for Tx only * @dev: network device * @napi: NAPI context @@ -3470,7 +3544,7 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue, * because in netdev_tx_completed_queue we update the dql_completed * before checking the XOFF flag. */ - smp_mb(); + smp_mb__after_atomic(); /* check again in case another CPU has just made room avail */ if (unlikely(dql_avail(&dev_queue->dql) >= 0)) diff --git a/include/linux/netlink.h b/include/linux/netlink.h index b332c2048c75..c3ae84a77e16 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -34,6 +34,7 @@ struct netlink_skb_parms { #define NETLINK_CB(skb) (*(struct netlink_skb_parms*)&((skb)->cb)) #define NETLINK_CREDS(skb) (&NETLINK_CB((skb)).creds) +#define NETLINK_CTX_SIZE 48 void netlink_table_grab(void); @@ -239,7 +240,7 @@ int netlink_register_notifier(struct notifier_block *nb); int netlink_unregister_notifier(struct notifier_block *nb); /* finegrained unicast helpers: */ -struct sock *netlink_getsockbyfilp(struct file *filp); +struct sock *netlink_getsockbyfd(int fd); int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk); void netlink_detachskb(struct sock *sk, struct sk_buff *skb); @@ -293,7 +294,7 @@ struct netlink_callback { int flags; bool strict_check; union { - u8 ctx[48]; + u8 ctx[NETLINK_CTX_SIZE]; /* args is deprecated. Cast a struct over ctx instead * for proper type safety. @@ -302,7 +303,7 @@ struct netlink_callback { }; }; -#define NL_ASSERT_DUMP_CTX_FITS(type_name) \ +#define NL_ASSERT_CTX_FITS(type_name) \ BUILD_BUG_ON(sizeof(type_name) > \ sizeof_field(struct netlink_callback, ctx)) diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index cd4e28db0cbd..b34301650c47 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -32,6 +32,7 @@ struct netpoll { bool ipv6; u16 local_port, remote_port; u8 remote_mac[ETH_ALEN]; + struct sk_buff_head skb_pool; }; struct netpoll_info { @@ -72,7 +73,7 @@ static inline void *netpoll_poll_lock(struct napi_struct *napi) { struct net_device *dev = napi->dev; - if (dev && dev->npinfo) { + if (dev && rcu_access_pointer(dev->npinfo)) { int owner = smp_processor_id(); while (cmpxchg(&napi->poll_owner, -1, owner) != -1) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 12d8e47bc5a3..559273a0f16d 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1336,7 +1336,7 @@ struct pnfs_commit_array { struct rcu_head rcu; refcount_t refcount; unsigned int nbuckets; - struct pnfs_commit_bucket buckets[]; + struct pnfs_commit_bucket buckets[] __counted_by(nbuckets); }; struct pnfs_ds_commit_info { diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h index 3982fea79919..9202f4b24343 100644 --- a/include/linux/nfslocalio.h +++ b/include/linux/nfslocalio.h @@ -55,7 +55,7 @@ struct nfsd_localio_operations { const struct cred *, const struct nfs_fh *, const fmode_t); - void (*nfsd_file_put_local)(struct nfsd_file *); + struct net *(*nfsd_file_put_local)(struct nfsd_file *); struct file *(*nfsd_file_file)(struct nfsd_file *); } ____cacheline_aligned; @@ -66,7 +66,7 @@ struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *, struct rpc_clnt *, const struct cred *, const struct nfs_fh *, const fmode_t); -static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) +static inline void nfs_to_nfsd_net_put(struct net *net) { /* * Once reference to nfsd_serv is dropped, NFSD could be @@ -74,10 +74,22 @@ static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) * by always taking RCU. */ rcu_read_lock(); - nfs_to->nfsd_file_put_local(localio); + nfs_to->nfsd_serv_put(net); rcu_read_unlock(); } +static inline void nfs_to_nfsd_file_put_local(struct nfsd_file *localio) +{ + /* + * Must not hold RCU otherwise nfsd_file_put() can easily trigger: + * "Voluntary context switch within RCU read-side critical section!" + * by scheduling deep in underlying filesystem (e.g. XFS). + */ + struct net *net = nfs_to->nfsd_file_put_local(localio); + + nfs_to_nfsd_net_put(net); +} + #else /* CONFIG_NFS_LOCALIO */ static inline void nfsd_localio_ops_init(void) { diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 45702bdcbceb..b42e64734968 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -237,7 +237,5 @@ static inline int notifier_to_errno(int ret) #define KBD_KEYSYM 0x0004 /* Keyboard keysym */ #define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */ -extern struct blocking_notifier_head reboot_notifier_list; - #endif /* __KERNEL__ */ #endif /* _LINUX_NOTIFIER_H */ diff --git a/include/linux/nvme.h b/include/linux/nvme.h index b58d9405d65e..13377dde4527 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -327,7 +327,8 @@ struct nvme_id_ctrl { __le32 sanicap; __le32 hmminds; __le16 hmmaxd; - __u8 rsvd338[4]; + __le16 nvmsetidmax; + __le16 endgidmax; __u8 anatt; __u8 anacap; __le32 anagrpmax; @@ -388,6 +389,11 @@ enum { NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5, NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7, NVME_CTRL_CTRATT_UUID_LIST = 1 << 9, + NVME_CTRL_SGLS_BYTE_ALIGNED = 1, + NVME_CTRL_SGLS_DWORD_ALIGNED = 2, + NVME_CTRL_SGLS_KSDBDS = 1 << 2, + NVME_CTRL_SGLS_MSDS = 1 << 19, + NVME_CTRL_SGLS_SAOS = 1 << 20, }; struct nvme_lbaf { @@ -522,6 +528,7 @@ enum { NVME_ID_CNS_NS_DESC_LIST = 0x03, NVME_ID_CNS_CS_NS = 0x05, NVME_ID_CNS_CS_CTRL = 0x06, + NVME_ID_CNS_NS_ACTIVE_LIST_CS = 0x07, NVME_ID_CNS_NS_CS_INDEP = 0x08, NVME_ID_CNS_NS_PRESENT_LIST = 0x10, NVME_ID_CNS_NS_PRESENT = 0x11, @@ -530,6 +537,7 @@ enum { NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15, NVME_ID_CNS_NS_GRANULARITY = 0x16, NVME_ID_CNS_UUID_LIST = 0x17, + NVME_ID_CNS_ENDGRP_LIST = 0x19, }; enum { @@ -560,6 +568,8 @@ enum { NVME_NS_FLBAS_LBA_SHIFT = 1, NVME_NS_FLBAS_META_EXT = 0x10, NVME_NS_NMIC_SHARED = 1 << 0, + NVME_NS_ROTATIONAL = 1 << 4, + NVME_NS_VWC_NOT_PRESENT = 1 << 5, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, @@ -617,6 +627,40 @@ enum { NVME_NIDT_CSI = 0x04, }; +struct nvme_endurance_group_log { + __u8 egcw; + __u8 egfeat; + __u8 rsvd2; + __u8 avsp; + __u8 avspt; + __u8 pused; + __le16 did; + __u8 rsvd8[24]; + __u8 ee[16]; + __u8 dur[16]; + __u8 duw[16]; + __u8 muw[16]; + __u8 hrc[16]; + __u8 hwc[16]; + __u8 mdie[16]; + __u8 neile[16]; + __u8 tegcap[16]; + __u8 uegcap[16]; + __u8 rsvd192[320]; +}; + +struct nvme_rotational_media_log { + __le16 endgid; + __le16 numa; + __le16 nrs; + __u8 rsvd6[2]; + __le32 spinc; + __le32 fspinc; + __le32 ldc; + __le32 fldc; + __u8 rsvd24[488]; +}; + struct nvme_smart_log { __u8 critical_warning; __u8 temperature[2]; @@ -1244,6 +1288,7 @@ enum { NVME_FEAT_WRITE_PROTECT = 0x84, NVME_FEAT_VENDOR_START = 0xC0, NVME_FEAT_VENDOR_END = 0xFF, + NVME_LOG_SUPPORTED = 0x00, NVME_LOG_ERROR = 0x01, NVME_LOG_SMART = 0x02, NVME_LOG_FW_SLOT = 0x03, @@ -1254,6 +1299,8 @@ enum { NVME_LOG_TELEMETRY_CTRL = 0x08, NVME_LOG_ENDURANCE_GROUP = 0x09, NVME_LOG_ANA = 0x0c, + NVME_LOG_FEATURES = 0x12, + NVME_LOG_RMI = 0x16, NVME_LOG_DISC = 0x70, NVME_LOG_RESERVATION = 0x80, NVME_FWACT_REPL = (0 << 3), @@ -1261,6 +1308,24 @@ enum { NVME_FWACT_ACTV = (2 << 3), }; +struct nvme_supported_log { + __le32 lids[256]; +}; + +enum { + NVME_LIDS_LSUPP = 1 << 0, +}; + +struct nvme_supported_features_log { + __le32 fis[256]; +}; + +enum { + NVME_FIS_FSUPP = 1 << 0, + NVME_FIS_NSCPE = 1 << 20, + NVME_FIS_CSCPE = 1 << 21, +}; + /* NVMe Namespace Write Protect State */ enum { NVME_NS_NO_WRITE_PROTECT = 0, @@ -1281,7 +1346,8 @@ struct nvme_identify { __u8 cns; __u8 rsvd3; __le16 ctrlid; - __u8 rsvd11[3]; + __le16 cnssid; + __u8 rsvd11; __u8 csi; __u32 rsvd12[4]; }; @@ -1389,7 +1455,7 @@ struct nvme_get_log_page_command { __u8 lsp; /* upper 4 bits reserved */ __le16 numdl; __le16 numdu; - __u16 rsvd11; + __le16 lsi; union { struct { __le32 lpol; @@ -2037,4 +2103,81 @@ struct nvme_completion { #define NVME_MINOR(ver) (((ver) >> 8) & 0xff) #define NVME_TERTIARY(ver) ((ver) & 0xff) +enum { + NVME_AEN_RESV_LOG_PAGE_AVALIABLE = 0x00, +}; + +enum { + NVME_PR_LOG_EMPTY_LOG_PAGE = 0x00, + NVME_PR_LOG_REGISTRATION_PREEMPTED = 0x01, + NVME_PR_LOG_RESERVATION_RELEASED = 0x02, + NVME_PR_LOG_RESERVATOIN_PREEMPTED = 0x03, +}; + +enum { + NVME_PR_NOTIFY_BIT_REG_PREEMPTED = 1, + NVME_PR_NOTIFY_BIT_RESV_RELEASED = 2, + NVME_PR_NOTIFY_BIT_RESV_PREEMPTED = 3, +}; + +struct nvme_pr_log { + __le64 count; + __u8 type; + __u8 nr_pages; + __u8 rsvd1[2]; + __le32 nsid; + __u8 rsvd2[48]; +}; + +struct nvmet_pr_register_data { + __le64 crkey; + __le64 nrkey; +}; + +struct nvmet_pr_acquire_data { + __le64 crkey; + __le64 prkey; +}; + +struct nvmet_pr_release_data { + __le64 crkey; +}; + +enum nvme_pr_capabilities { + NVME_PR_SUPPORT_PTPL = 1, + NVME_PR_SUPPORT_WRITE_EXCLUSIVE = 1 << 1, + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS = 1 << 2, + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_REG_ONLY = 1 << 3, + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_REG_ONLY = 1 << 4, + NVME_PR_SUPPORT_WRITE_EXCLUSIVE_ALL_REGS = 1 << 5, + NVME_PR_SUPPORT_EXCLUSIVE_ACCESS_ALL_REGS = 1 << 6, + NVME_PR_SUPPORT_IEKEY_VER_1_3_DEF = 1 << 7, +}; + +enum nvme_pr_register_action { + NVME_PR_REGISTER_ACT_REG = 0, + NVME_PR_REGISTER_ACT_UNREG = 1, + NVME_PR_REGISTER_ACT_REPLACE = 1 << 1, +}; + +enum nvme_pr_acquire_action { + NVME_PR_ACQUIRE_ACT_ACQUIRE = 0, + NVME_PR_ACQUIRE_ACT_PREEMPT = 1, + NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT = 1 << 1, +}; + +enum nvme_pr_release_action { + NVME_PR_RELEASE_ACT_RELEASE = 0, + NVME_PR_RELEASE_ACT_CLEAR = 1, +}; + +enum nvme_pr_change_ptpl { + NVME_PR_CPTPL_NO_CHANGE = 0, + NVME_PR_CPTPL_RESV = 1 << 30, + NVME_PR_CPTPL_CLEARED = 2 << 30, + NVME_PR_CPTPL_PERSIST = 3 << 30, +}; + +#define NVME_PR_IGNORE_KEY (1 << 3) + #endif /* _LINUX_NVME_H */ diff --git a/include/linux/of.h b/include/linux/of.h index 85b60ac9eec5..086a60f3b8a6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -357,7 +357,7 @@ extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); extern struct device_node *of_cpu_device_node_get(int cpu); extern int of_cpu_node_to_id(struct device_node *np); extern struct device_node *of_get_next_cpu_node(struct device_node *prev); -extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node, +extern struct device_node *of_get_cpu_state_node(const struct device_node *cpu_node, int index); extern u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread); @@ -395,7 +395,7 @@ extern int of_phandle_iterator_args(struct of_phandle_iterator *it, int size); extern void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)); -extern int of_alias_get_id(struct device_node *np, const char *stem); +extern int of_alias_get_id(const struct device_node *np, const char *stem); extern int of_alias_get_highest_id(const char *stem); bool of_machine_compatible_match(const char *const *compats); @@ -435,7 +435,7 @@ extern int of_detach_node(struct device_node *); * of_property_for_each_u32(np, "propname", u) * printk("U32 value: %x\n", u); */ -const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur, +const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu); /* * struct property *prop; @@ -444,11 +444,11 @@ const __be32 *of_prop_next_u32(struct property *prop, const __be32 *cur, * of_property_for_each_string(np, "propname", prop, s) * printk("String value: %s\n", s); */ -const char *of_prop_next_string(struct property *prop, const char *cur); +const char *of_prop_next_string(const struct property *prop, const char *cur); -bool of_console_check(struct device_node *dn, char *name, int index); +bool of_console_check(const struct device_node *dn, char *name, int index); -int of_map_id(struct device_node *np, u32 id, +int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out); @@ -826,13 +826,13 @@ static inline bool of_console_check(const struct device_node *dn, const char *na return false; } -static inline const __be32 *of_prop_next_u32(struct property *prop, +static inline const __be32 *of_prop_next_u32(const struct property *prop, const __be32 *cur, u32 *pu) { return NULL; } -static inline const char *of_prop_next_string(struct property *prop, +static inline const char *of_prop_next_string(const struct property *prop, const char *cur) { return NULL; @@ -871,7 +871,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag { } -static inline int of_map_id(struct device_node *np, u32 id, +static inline int of_map_id(const struct device_node *np, u32 id, const char *map_name, const char *map_mask_name, struct device_node **target, u32 *id_out) { @@ -899,7 +899,7 @@ static inline const void *of_device_get_match_data(const struct device *dev) #define of_node_cmp(s1, s2) strcasecmp((s1), (s2)) #endif -static inline int of_prop_val_eq(struct property *p1, struct property *p2) +static inline int of_prop_val_eq(const struct property *p1, const struct property *p2) { return p1->length == p2->length && !memcmp(p1->value, p2->value, (size_t)p1->length); @@ -1252,7 +1252,7 @@ static inline int of_property_read_string_index(const struct device_node *np, static inline bool of_property_read_bool(const struct device_node *np, const char *propname) { - struct property *prop = of_find_property(np, propname, NULL); + const struct property *prop = of_find_property(np, propname, NULL); return prop ? true : false; } @@ -1430,7 +1430,7 @@ static inline int of_property_read_s32(const struct device_node *np, err = of_phandle_iterator_next(it)) #define of_property_for_each_u32(np, propname, u) \ - for (struct {struct property *prop; const __be32 *item; } _it = \ + for (struct {const struct property *prop; const __be32 *item; } _it = \ {of_find_property(np, propname, NULL), \ of_prop_next_u32(_it.prop, NULL, &u)}; \ _it.item; \ @@ -1734,7 +1734,7 @@ struct of_overlay_notify_data { #ifdef CONFIG_OF_OVERLAY int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, - int *ovcs_id, struct device_node *target_base); + int *ovcs_id, const struct device_node *target_base); int of_overlay_remove(int *ovcs_id); int of_overlay_remove_all(void); @@ -1744,7 +1744,7 @@ int of_overlay_notifier_unregister(struct notifier_block *nb); #else static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size, - int *ovcs_id, struct device_node *target_base) + int *ovcs_id, const struct device_node *target_base) { return -ENOTSUPP; } diff --git a/include/linux/of_address.h b/include/linux/of_address.h index 26a19daf0d09..9e034363788a 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -10,7 +10,7 @@ struct of_bus; struct of_pci_range_parser { struct device_node *node; - struct of_bus *bus; + const struct of_bus *bus; const __be32 *range; const __be32 *end; int na; @@ -83,8 +83,8 @@ extern struct of_pci_range *of_pci_range_parser_one( struct of_pci_range *range); extern int of_pci_address_to_resource(struct device_node *dev, int bar, struct resource *r); -extern int of_pci_range_to_resource(struct of_pci_range *range, - struct device_node *np, +extern int of_pci_range_to_resource(const struct of_pci_range *range, + const struct device_node *np, struct resource *res); extern int of_range_to_resource(struct device_node *np, int index, struct resource *res); diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index d69ad5bb1eb1..b8d6c0c20876 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -31,6 +31,7 @@ extern void *of_fdt_unflatten_tree(const unsigned long *blob, extern int __initdata dt_root_addr_cells; extern int __initdata dt_root_size_cells; extern void *initial_boot_params; +extern phys_addr_t initial_boot_params_pa; extern char __dtb_start[]; extern char __dtb_end[]; @@ -70,8 +71,8 @@ extern u64 dt_mem_next_cell(int s, const __be32 **cellp); /* Early flat tree scan hooks */ extern int early_init_dt_scan_root(void); -extern bool early_init_dt_scan(void *params); -extern bool early_init_dt_verify(void *params); +extern bool early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys); +extern bool early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys); extern void early_init_dt_scan_nodes(void); extern const char *of_flat_dt_get_machine_name(void); diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index a4bea62bfa29..a692d9d979a6 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h @@ -11,6 +11,7 @@ #ifndef __LINUX_OF_GRAPH_H #define __LINUX_OF_GRAPH_H +#include <linux/cleanup.h> #include <linux/types.h> #include <linux/errno.h> @@ -37,14 +38,43 @@ struct of_endpoint { for (child = of_graph_get_next_endpoint(parent, NULL); child != NULL; \ child = of_graph_get_next_endpoint(parent, child)) +/** + * for_each_of_graph_port - iterate over every port in a device or ports node + * @parent: parent device or ports node containing port + * @child: loop variable pointing to the current port node + * + * When breaking out of the loop, and continue to use the @child, you need to + * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it. + */ +#define for_each_of_graph_port(parent, child) \ + for (struct device_node *child __free(device_node) = of_graph_get_next_port(parent, NULL);\ + child != NULL; child = of_graph_get_next_port(parent, child)) + +/** + * for_each_of_graph_port_endpoint - iterate over every endpoint in a port node + * @parent: parent port node + * @child: loop variable pointing to the current endpoint node + * + * When breaking out of the loop, and continue to use the @child, you need to + * use return_ptr(@child) or no_free_ptr(@child) not to call __free() for it. + */ +#define for_each_of_graph_port_endpoint(parent, child) \ + for (struct device_node *child __free(device_node) = of_graph_get_next_port_endpoint(parent, NULL);\ + child != NULL; child = of_graph_get_next_port_endpoint(parent, child)) + #ifdef CONFIG_OF bool of_graph_is_present(const struct device_node *node); int of_graph_parse_endpoint(const struct device_node *node, struct of_endpoint *endpoint); unsigned int of_graph_get_endpoint_count(const struct device_node *np); +unsigned int of_graph_get_port_count(struct device_node *np); struct device_node *of_graph_get_port_by_id(struct device_node *node, u32 id); struct device_node *of_graph_get_next_endpoint(const struct device_node *parent, struct device_node *previous); +struct device_node *of_graph_get_next_port(const struct device_node *parent, + struct device_node *port); +struct device_node *of_graph_get_next_port_endpoint(const struct device_node *port, + struct device_node *prev); struct device_node *of_graph_get_endpoint_by_regs( const struct device_node *parent, int port_reg, int reg); struct device_node *of_graph_get_remote_endpoint( @@ -73,6 +103,11 @@ static inline unsigned int of_graph_get_endpoint_count(const struct device_node return 0; } +static inline unsigned int of_graph_get_port_count(struct device_node *np) +{ + return 0; +} + static inline struct device_node *of_graph_get_port_by_id( struct device_node *node, u32 id) { @@ -86,6 +121,20 @@ static inline struct device_node *of_graph_get_next_endpoint( return NULL; } +static inline struct device_node *of_graph_get_next_port( + const struct device_node *parent, + struct device_node *previous) +{ + return NULL; +} + +static inline struct device_node *of_graph_get_next_port_endpoint( + const struct device_node *parent, + struct device_node *previous) +{ + return NULL; +} + static inline struct device_node *of_graph_get_endpoint_by_regs( const struct device_node *parent, int port_reg, int reg) { diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h index d6d3eae2f145..6337ad4e5fe8 100644 --- a/include/linux/of_irq.h +++ b/include/linux/of_irq.h @@ -48,12 +48,12 @@ extern int of_irq_to_resource_table(struct device_node *dev, struct resource *res, int nr_irqs); extern struct device_node *of_irq_find_parent(struct device_node *child); extern struct irq_domain *of_msi_get_domain(struct device *dev, - struct device_node *np, + const struct device_node *np, enum irq_domain_bus_token token); extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev, u32 id, u32 bus_token); -extern void of_msi_configure(struct device *dev, struct device_node *np); +extern void of_msi_configure(struct device *dev, const struct device_node *np); u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in); #else static inline void of_irq_init(const struct of_device_id *matches) diff --git a/include/linux/once.h b/include/linux/once.h index bc714d414448..30346fcdc799 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -46,7 +46,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key, #define DO_ONCE(func, ...) \ ({ \ bool ___ret = false; \ - static bool __section(".data.once") ___done = false; \ + static bool __section(".data..once") ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ unsigned long ___flags; \ @@ -64,7 +64,7 @@ void __do_once_sleepable_done(bool *done, struct static_key_true *once_key, #define DO_ONCE_SLEEPABLE(func, ...) \ ({ \ bool ___ret = false; \ - static bool __section(".data.once") ___done = false; \ + static bool __section(".data..once") ___done = false; \ static DEFINE_STATIC_KEY_TRUE(___once_key); \ if (static_branch_unlikely(&___once_key)) { \ ___ret = __do_once_sleepable_start(&___done); \ diff --git a/include/linux/once_lite.h b/include/linux/once_lite.h index b7bce4983638..27de7bc32a06 100644 --- a/include/linux/once_lite.h +++ b/include/linux/once_lite.h @@ -12,7 +12,7 @@ #define __ONCE_LITE_IF(condition) \ ({ \ - static bool __section(".data.once") __already_done; \ + static bool __section(".data..once") __already_done; \ bool __ret_cond = !!(condition); \ bool __ret_once = false; \ \ diff --git a/include/linux/oom.h b/include/linux/oom.h index 7d0c9c48a0c5..1e0fc6931ce9 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h @@ -7,7 +7,6 @@ #include <linux/types.h> #include <linux/nodemask.h> #include <uapi/linux/oom.h> -#include <linux/sched/coredump.h> /* MMF_* */ #include <linux/mm.h> /* VM_FAULT* */ struct zonelist; diff --git a/include/linux/packing.h b/include/linux/packing.h index 8d6571feb95d..5d36dcd06f60 100644 --- a/include/linux/packing.h +++ b/include/linux/packing.h @@ -17,33 +17,13 @@ enum packing_op { UNPACK, }; -/** - * packing - Convert numbers (currently u64) between a packed and an unpacked - * format. Unpacked means laid out in memory in the CPU's native - * understanding of integers, while packed means anything else that - * requires translation. - * - * @pbuf: Pointer to a buffer holding the packed value. - * @uval: Pointer to an u64 holding the unpacked value. - * @startbit: The index (in logical notation, compensated for quirks) where - * the packed value starts within pbuf. Must be larger than, or - * equal to, endbit. - * @endbit: The index (in logical notation, compensated for quirks) where - * the packed value ends within pbuf. Must be smaller than, or equal - * to, startbit. - * @op: If PACK, then uval will be treated as const pointer and copied (packed) - * into pbuf, between startbit and endbit. - * If UNPACK, then pbuf will be treated as const pointer and the logical - * value between startbit and endbit will be copied (unpacked) to uval. - * @quirks: A bit mask of QUIRK_LITTLE_ENDIAN, QUIRK_LSW32_IS_FIRST and - * QUIRK_MSB_ON_THE_RIGHT. - * - * Return: 0 on success, EINVAL or ERANGE if called incorrectly. Assuming - * correct usage, return code may be discarded. - * If op is PACK, pbuf is modified. - * If op is UNPACK, uval is modified. - */ int packing(void *pbuf, u64 *uval, int startbit, int endbit, size_t pbuflen, enum packing_op op, u8 quirks); +int pack(void *pbuf, u64 uval, size_t startbit, size_t endbit, size_t pbuflen, + u8 quirks); + +int unpack(const void *pbuf, u64 *uval, size_t startbit, size_t endbit, + size_t pbuflen, u8 quirks); + #endif diff --git a/include/linux/page-flags-layout.h b/include/linux/page-flags-layout.h index 7d79818dc065..4f5c9e979bb9 100644 --- a/include/linux/page-flags-layout.h +++ b/include/linux/page-flags-layout.h @@ -111,5 +111,12 @@ ZONES_WIDTH - LRU_GEN_WIDTH - SECTIONS_WIDTH - \ NODES_WIDTH - KASAN_TAG_WIDTH - LAST_CPUPID_WIDTH) +#define NR_NON_PAGEFLAG_BITS (SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH + \ + LAST_CPUPID_SHIFT + KASAN_TAG_WIDTH + \ + LRU_GEN_WIDTH + LRU_REFS_WIDTH) + +#define NR_UNUSED_PAGEFLAG_BITS (BITS_PER_LONG - \ + (NR_NON_PAGEFLAG_BITS + NR_PAGEFLAGS)) + #endif #endif /* _LINUX_PAGE_FLAGS_LAYOUT */ diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index cc839e4365c1..2220bfec278e 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -543,7 +543,7 @@ FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE) * - PG_private and PG_private_2 cause release_folio() and co to be invoked */ PAGEFLAG(Private, private, PF_ANY) -PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) +FOLIO_FLAG(private_2, FOLIO_HEAD_PAGE) /* owner_2 can be set on tail pages for anon memory */ FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) @@ -554,7 +554,7 @@ FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) */ TESTPAGEFLAG(Writeback, writeback, PF_NO_TAIL) TESTSCFLAG(Writeback, writeback, PF_NO_TAIL) -PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) +FOLIO_FLAG(mappedtodisk, FOLIO_HEAD_PAGE) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) @@ -689,6 +689,13 @@ static __always_inline bool folio_test_anon(const struct folio *folio) return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; } +static __always_inline bool PageAnonNotKsm(const struct page *page) +{ + unsigned long flags = (unsigned long)page_folio(page)->mapping; + + return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON; +} + static __always_inline bool PageAnon(const struct page *page) { return folio_test_anon(page_folio(page)); @@ -718,13 +725,8 @@ static __always_inline bool folio_test_ksm(const struct folio *folio) return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_KSM; } - -static __always_inline bool PageKsm(const struct page *page) -{ - return folio_test_ksm(page_folio(page)); -} #else -TESTPAGEFLAG_FALSE(Ksm, ksm) +FOLIO_TEST_FLAG_FALSE(ksm) #endif u64 stable_page_flags(const struct page *page); @@ -1137,14 +1139,14 @@ static __always_inline int PageAnonExclusive(const struct page *page) static __always_inline void SetPageAnonExclusive(struct page *page) { - VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); + VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); set_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } static __always_inline void ClearPageAnonExclusive(struct page *page) { - VM_BUG_ON_PGFLAGS(!PageAnon(page) || PageKsm(page), page); + VM_BUG_ON_PGFLAGS(!PageAnonNotKsm(page), page); VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags); } diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index c16db0067090..73dc2c1841ec 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -3,10 +3,6 @@ #define __LINUX_PAGEISOLATION_H #ifdef CONFIG_MEMORY_ISOLATION -static inline bool has_isolate_pageblock(struct zone *zone) -{ - return zone->nr_isolate_pageblock; -} static inline bool is_migrate_isolate_page(struct page *page) { return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; @@ -16,10 +12,6 @@ static inline bool is_migrate_isolate(int migratetype) return migratetype == MIGRATE_ISOLATE; } #else -static inline bool has_isolate_pageblock(struct zone *zone) -{ - return false; -} static inline bool is_migrate_isolate_page(struct page *page) { return false; diff --git a/include/linux/page_frag_cache.h b/include/linux/page_frag_cache.h new file mode 100644 index 000000000000..41a91df82631 --- /dev/null +++ b/include/linux/page_frag_cache.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_PAGE_FRAG_CACHE_H +#define _LINUX_PAGE_FRAG_CACHE_H + +#include <linux/bits.h> +#include <linux/log2.h> +#include <linux/mm_types_task.h> +#include <linux/types.h> + +#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) +/* Use a full byte here to enable assembler optimization as the shift + * operation is usually expecting a byte. + */ +#define PAGE_FRAG_CACHE_ORDER_MASK GENMASK(7, 0) +#else +/* Compiler should be able to figure out we don't read things as any value + * ANDed with 0 is 0. + */ +#define PAGE_FRAG_CACHE_ORDER_MASK 0 +#endif + +#define PAGE_FRAG_CACHE_PFMEMALLOC_BIT (PAGE_FRAG_CACHE_ORDER_MASK + 1) + +static inline bool encoded_page_decode_pfmemalloc(unsigned long encoded_page) +{ + return !!(encoded_page & PAGE_FRAG_CACHE_PFMEMALLOC_BIT); +} + +static inline void page_frag_cache_init(struct page_frag_cache *nc) +{ + nc->encoded_page = 0; +} + +static inline bool page_frag_cache_is_pfmemalloc(struct page_frag_cache *nc) +{ + return encoded_page_decode_pfmemalloc(nc->encoded_page); +} + +void page_frag_cache_drain(struct page_frag_cache *nc); +void __page_frag_cache_drain(struct page *page, unsigned int count); +void *__page_frag_alloc_align(struct page_frag_cache *nc, unsigned int fragsz, + gfp_t gfp_mask, unsigned int align_mask); + +static inline void *page_frag_alloc_align(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask, + unsigned int align) +{ + WARN_ON_ONCE(!is_power_of_2(align)); + return __page_frag_alloc_align(nc, fragsz, gfp_mask, -align); +} + +static inline void *page_frag_alloc(struct page_frag_cache *nc, + unsigned int fragsz, gfp_t gfp_mask) +{ + return __page_frag_alloc_align(nc, fragsz, gfp_mask, ~0u); +} + +void page_frag_free(void *addr); + +#endif diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 68a5f1ff3301..bcf0865a38ae 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1011,22 +1011,25 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping, return read_cache_folio(mapping, index, NULL, file); } -/* - * Get the offset in PAGE_SIZE (even for hugetlb pages). +/** + * page_pgoff - Calculate the logical page offset of this page. + * @folio: The folio containing this page. + * @page: The page which we need the offset of. + * + * For file pages, this is the offset from the beginning of the file + * in units of PAGE_SIZE. For anonymous pages, this is the offset from + * the beginning of the anon_vma in units of PAGE_SIZE. This will + * return nonsense for KSM pages. + * + * Context: Caller must have a reference on the folio or otherwise + * prevent it from being split or freed. + * + * Return: The offset in units of PAGE_SIZE. */ -static inline pgoff_t page_to_pgoff(struct page *page) +static inline pgoff_t page_pgoff(const struct folio *folio, + const struct page *page) { - struct page *head; - - if (likely(!PageTransTail(page))) - return page->index; - - head = compound_head(page); - /* - * We don't initialize ->index for tail pages: calculate based on - * head page - */ - return head->index + page - head; + return folio->index + folio_page_idx(folio, page); } /* diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index f5eb5a32aeed..9700a29f8afb 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -25,12 +25,15 @@ enum page_walk_lock { * this handler is required to be able to handle * pmd_trans_huge() pmds. They may simply choose to * split_huge_page() instead of handling it explicitly. - * @pte_entry: if set, called for each PTE (lowest-level) entry, - * including empty ones + * @pte_entry: if set, called for each PTE (lowest-level) entry + * including empty ones, except if @install_pte is set. + * If @install_pte is set, @pte_entry is called only for + * existing PTEs. * @pte_hole: if set, called for each hole at all levels, * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD. * Any folded depths (where PTRS_PER_P?D is equal to 1) - * are skipped. + * are skipped. If @install_pte is specified, this will + * not trigger for any populated ranges. * @hugetlb_entry: if set, called for each hugetlb entry. This hook * function is called with the vma lock held, in order to * protect against a concurrent freeing of the pte_t* or @@ -51,6 +54,13 @@ enum page_walk_lock { * @pre_vma: if set, called before starting walk on a non-null vma. * @post_vma: if set, called after a walk on a non-null vma, provided * that @pre_vma and the vma walk succeeded. + * @install_pte: if set, missing page table entries are installed and + * thus all levels are always walked in the specified + * range. This callback is then invoked at the PTE level + * (having split any THP pages prior), providing the PTE to + * install. If allocations fail, the walk is aborted. This + * operation is only available for userland memory. Not + * usable for hugetlb ranges. * * p?d_entry callbacks are called even if those levels are folded on a * particular architecture/configuration. @@ -76,6 +86,8 @@ struct mm_walk_ops { int (*pre_vma)(unsigned long start, unsigned long end, struct mm_walk *walk); void (*post_vma)(struct mm_walk *walk); + int (*install_pte)(unsigned long addr, unsigned long next, + pte_t *ptep, struct mm_walk *walk); enum page_walk_lock walk_lock; }; diff --git a/include/linux/pci-bwctrl.h b/include/linux/pci-bwctrl.h new file mode 100644 index 000000000000..cee07127455b --- /dev/null +++ b/include/linux/pci-bwctrl.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * PCIe bandwidth controller + * + * Copyright (C) 2023-2024 Intel Corporation + */ + +#ifndef LINUX_PCI_BWCTRL_H +#define LINUX_PCI_BWCTRL_H + +#include <linux/pci.h> + +struct thermal_cooling_device; + +#ifdef CONFIG_PCIE_THERMAL +struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port); +void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev); +#else +static inline struct thermal_cooling_device *pcie_cooling_device_register(struct pci_dev *port) +{ + return NULL; +} +static inline void pcie_cooling_device_unregister(struct thermal_cooling_device *cdev) +{ +} +#endif + +#endif diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 42ef06136bd1..e818e3fdcded 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -33,10 +33,42 @@ pci_epc_interface_string(enum pci_epc_interface_type type) } /** + * struct pci_epc_map - information about EPC memory for mapping a RC PCI + * address range + * @pci_addr: start address of the RC PCI address range to map + * @pci_size: size of the RC PCI address range mapped from @pci_addr + * @map_pci_addr: RC PCI address used as the first address mapped (may be lower + * than @pci_addr) + * @map_size: size of the controller memory needed for mapping the RC PCI address + * range @map_pci_addr..@pci_addr+@pci_size + * @phys_base: base physical address of the allocated EPC memory for mapping the + * RC PCI address range + * @phys_addr: physical address at which @pci_addr is mapped + * @virt_base: base virtual address of the allocated EPC memory for mapping the + * RC PCI address range + * @virt_addr: virtual address at which @pci_addr is mapped + */ +struct pci_epc_map { + u64 pci_addr; + size_t pci_size; + + u64 map_pci_addr; + size_t map_size; + + phys_addr_t phys_base; + phys_addr_t phys_addr; + void __iomem *virt_base; + void __iomem *virt_addr; +}; + +/** * struct pci_epc_ops - set of function pointers for performing EPC operations * @write_header: ops to populate configuration space header * @set_bar: ops to configure the BAR * @clear_bar: ops to reset the BAR + * @align_addr: operation to get the mapping address, mapping size and offset + * into a controller memory window needed to map an RC PCI address + * region * @map_addr: ops to map CPU address to PCI address * @unmap_addr: ops to unmap CPU address and PCI address * @set_msi: ops to set the requested number of MSI interrupts in the MSI @@ -61,6 +93,8 @@ struct pci_epc_ops { struct pci_epf_bar *epf_bar); void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); + u64 (*align_addr)(struct pci_epc *epc, u64 pci_addr, size_t *size, + size_t *offset); int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr, u64 pci_addr, size_t size); void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, @@ -278,6 +312,10 @@ void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc, phys_addr_t *phys_addr, size_t size); void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr, void __iomem *virt_addr, size_t size); +int pci_epc_mem_map(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u64 pci_addr, size_t pci_size, struct pci_epc_map *map); +void pci_epc_mem_unmap(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + struct pci_epc_map *map); #else static inline void pci_epc_init_notify(struct pci_epc *epc) diff --git a/include/linux/pci-pwrctl.h b/include/linux/pci-pwrctrl.h index 0d23dddf59ec..7d439b0675e9 100644 --- a/include/linux/pci-pwrctl.h +++ b/include/linux/pci-pwrctrl.h @@ -3,8 +3,8 @@ * Copyright (C) 2024 Linaro Ltd. */ -#ifndef __PCI_PWRCTL_H__ -#define __PCI_PWRCTL_H__ +#ifndef __PCI_PWRCTRL_H__ +#define __PCI_PWRCTRL_H__ #include <linux/notifier.h> #include <linux/workqueue.h> @@ -29,14 +29,14 @@ struct device_link; */ /** - * struct pci_pwrctl - PCI device power control context. + * struct pci_pwrctrl - PCI device power control context. * @dev: Address of the power controlling device. * * An object of this type must be allocated by the PCI power control device and - * passed to the pwrctl subsystem to trigger a bus rescan and setup a device + * passed to the pwrctrl subsystem to trigger a bus rescan and setup a device * link with the device once it's up. */ -struct pci_pwrctl { +struct pci_pwrctrl { struct device *dev; /* Private: don't use. */ @@ -45,10 +45,10 @@ struct pci_pwrctl { struct work_struct work; }; -void pci_pwrctl_init(struct pci_pwrctl *pwrctl, struct device *dev); -int pci_pwrctl_device_set_ready(struct pci_pwrctl *pwrctl); -void pci_pwrctl_device_unset_ready(struct pci_pwrctl *pwrctl); -int devm_pci_pwrctl_device_set_ready(struct device *dev, - struct pci_pwrctl *pwrctl); +void pci_pwrctrl_init(struct pci_pwrctrl *pwrctrl, struct device *dev); +int pci_pwrctrl_device_set_ready(struct pci_pwrctrl *pwrctrl); +void pci_pwrctrl_device_unset_ready(struct pci_pwrctrl *pwrctrl); +int devm_pci_pwrctrl_device_set_ready(struct device *dev, + struct pci_pwrctrl *pwrctrl); -#endif /* __PCI_PWRCTL_H__ */ +#endif /* __PCI_PWRCTRL_H__ */ diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h new file mode 100644 index 000000000000..c3e806c13d64 --- /dev/null +++ b/include/linux/pci-tph.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * TPH (TLP Processing Hints) + * + * Copyright (C) 2024 Advanced Micro Devices, Inc. + * Eric Van Tassell <Eric.VanTassell@amd.com> + * Wei Huang <wei.huang2@amd.com> + */ +#ifndef LINUX_PCI_TPH_H +#define LINUX_PCI_TPH_H + +/* + * According to the ECN for PCI Firmware Spec, Steering Tag can be different + * depending on the memory type: Volatile Memory or Persistent Memory. When a + * caller query about a target's Steering Tag, it must provide the target's + * tph_mem_type. ECN link: https://members.pcisig.com/wg/PCI-SIG/document/15470. + */ +enum tph_mem_type { + TPH_MEM_TYPE_VM, /* volatile memory */ + TPH_MEM_TYPE_PM /* persistent memory */ +}; + +#ifdef CONFIG_PCIE_TPH +int pcie_tph_set_st_entry(struct pci_dev *pdev, + unsigned int index, u16 tag); +int pcie_tph_get_cpu_st(struct pci_dev *dev, + enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *tag); +void pcie_disable_tph(struct pci_dev *pdev); +int pcie_enable_tph(struct pci_dev *pdev, int mode); +#else +static inline int pcie_tph_set_st_entry(struct pci_dev *pdev, + unsigned int index, u16 tag) +{ return -EINVAL; } +static inline int pcie_tph_get_cpu_st(struct pci_dev *dev, + enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *tag) +{ return -EINVAL; } +static inline void pcie_disable_tph(struct pci_dev *pdev) { } +static inline int pcie_enable_tph(struct pci_dev *pdev, int mode) +{ return -EINVAL; } +#endif + +#endif /* LINUX_PCI_TPH_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 573b4c4c2be6..db9b47ce3eef 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -313,12 +313,20 @@ struct pci_vpd { }; struct irq_affinity; +struct pcie_bwctrl_data; struct pcie_link_state; struct pci_sriov; struct pci_p2pdma; struct rcec_ea; -/* The pci_dev structure describes PCI devices */ +/* struct pci_dev - describes a PCI device + * + * @supported_speeds: PCIe Supported Link Speeds Vector (+ reserved 0 at + * LSB). 0 when the supported speeds cannot be + * determined (e.g., for Root Complex Integrated + * Endpoints without the relevant Capability + * Registers). + */ struct pci_dev { struct list_head bus_list; /* Node in per-bus list */ struct pci_bus *bus; /* Bus this device is on */ @@ -434,6 +442,7 @@ struct pci_dev { unsigned int ats_enabled:1; /* Address Translation Svc */ unsigned int pasid_enabled:1; /* Process Address Space ID */ unsigned int pri_enabled:1; /* Page Request Interface */ + unsigned int tph_enabled:1; /* TLP Processing Hints */ unsigned int is_managed:1; /* Managed via devres */ unsigned int is_msi_managed:1; /* MSI release via devres installed */ unsigned int needs_freset:1; /* Requires fundamental reset */ @@ -495,6 +504,7 @@ struct pci_dev { unsigned int dpc_rp_extensions:1; u8 dpc_rp_log_size; #endif + struct pcie_bwctrl_data *link_bwctrl; #ifdef CONFIG_PCI_ATS union { struct pci_sriov *sriov; /* PF: SR-IOV info */ @@ -522,6 +532,7 @@ struct pci_dev { struct npem *npem; /* Native PCIe Enclosure Management */ #endif u16 acs_cap; /* ACS Capability offset */ + u8 supported_speeds; /* Supported Link Speeds Vector */ phys_addr_t rom; /* Physical address if not from BAR */ size_t romlen; /* Length if not from BAR */ /* @@ -534,6 +545,12 @@ struct pci_dev { /* These methods index pci_reset_fn_methods[] */ u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ + +#ifdef CONFIG_PCIE_TPH + u16 tph_cap; /* TPH capability offset */ + u8 tph_mode; /* TPH mode */ + u8 tph_req_type; /* TPH requester type */ +#endif }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -626,27 +643,6 @@ void pci_set_host_bridge_release(struct pci_host_bridge *bridge, int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); -/* - * The first PCI_BRIDGE_RESOURCE_NUM PCI bus resources (those that correspond - * to P2P or CardBus bridge windows) go in a table. Additional ones (for - * buses below host bridges or subtractive decode bridges) go in the list. - * Use pci_bus_for_each_resource() to iterate through all the resources. - */ - -/* - * PCI_SUBTRACTIVE_DECODE means the bridge forwards the window implicitly - * and there's no way to program the bridge with the details of the window. - * This does not apply to ACPI _CRS windows, even with the _DEC subtractive- - * decode bit set, because they are explicit and can be programmed with _SRS. - */ -#define PCI_SUBTRACTIVE_DECODE 0x1 - -struct pci_bus_resource { - struct list_head list; - struct resource *res; - unsigned int flags; -}; - #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ struct pci_bus { @@ -1274,6 +1270,7 @@ static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev, { switch (pos) { case PCI_EXP_LNKCTL: + case PCI_EXP_LNKCTL2: case PCI_EXP_RTCTL: return pcie_capability_clear_and_set_word_locked(dev, pos, clear, set); @@ -1498,8 +1495,7 @@ void pci_add_resource(struct list_head *resources, struct resource *res); void pci_add_resource_offset(struct list_head *resources, struct resource *res, resource_size_t offset); void pci_free_resource_list(struct list_head *resources); -void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, - unsigned int flags); +void pci_bus_add_resource(struct pci_bus *bus, struct resource *res); struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); void pci_bus_remove_resources(struct pci_bus *bus); void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res); @@ -1556,7 +1552,7 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus, void *alignf_data); -int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr, +int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, resource_size_t size); unsigned long pci_address_to_pio(phys_addr_t addr); phys_addr_t pci_pio_to_address(unsigned long pio); @@ -1623,8 +1619,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, void pci_walk_bus(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), void *userdata); -void pci_walk_bus_locked(struct pci_bus *top, int (*cb)(struct pci_dev *, void *), - void *userdata); int pci_cfg_space_size(struct pci_dev *dev); unsigned char pci_bus_max_busnr(struct pci_bus *bus); void pci_setup_bridge(struct pci_bus *bus); @@ -1786,9 +1780,19 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d, #ifdef CONFIG_PCIEPORTBUS extern bool pcie_ports_disabled; extern bool pcie_ports_native; + +int pcie_set_target_speed(struct pci_dev *port, enum pci_bus_speed speed_req, + bool use_lt); #else #define pcie_ports_disabled true #define pcie_ports_native false + +static inline int pcie_set_target_speed(struct pci_dev *port, + enum pci_bus_speed speed_req, + bool use_lt) +{ + return -EOPNOTSUPP; +} #endif #define PCIE_LINK_STATE_L0S (BIT(0) | BIT(1)) /* Upstr/dwnstr L0s */ @@ -2019,7 +2023,7 @@ static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) { return -EIO; } static inline void pci_release_regions(struct pci_dev *dev) { } -static inline int pci_register_io_range(struct fwnode_handle *fwnode, +static inline int pci_register_io_range(const struct fwnode_handle *fwnode, phys_addr_t addr, resource_size_t size) { return -EINVAL; } @@ -2293,15 +2297,15 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) { } #endif +int pcim_request_all_regions(struct pci_dev *pdev, const char *name); void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, const char *name); +void pcim_iounmap_region(struct pci_dev *pdev, int bar); void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); int pcim_request_region(struct pci_dev *pdev, int bar, const char *name); int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); -int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, - const char *name); void pcim_iounmap_regions(struct pci_dev *pdev, int mask); void __iomem *pcim_iomap_range(struct pci_dev *pdev, int bar, unsigned long offset, unsigned long len); @@ -2609,6 +2613,12 @@ pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; } static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; } #endif +#if defined(CONFIG_X86) && defined(CONFIG_ACPI) +bool arch_pci_dev_is_removable(struct pci_dev *pdev); +#else +static inline bool arch_pci_dev_is_removable(struct pci_dev *pdev) { return false; } +#endif + #ifdef CONFIG_EEH static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev) { diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4cf6aaed5f35..d2402bf4aea2 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -121,6 +121,7 @@ #define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310 #define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320 #define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330 +#define PCI_CLASS_SERIAL_USB_CDNS 0x0c0380 #define PCI_CLASS_SERIAL_USB_DEVICE 0x0c03fe #define PCI_CLASS_SERIAL_FIBER 0x0c04 #define PCI_CLASS_SERIAL_SMBUS 0x0c05 @@ -2421,6 +2422,9 @@ #define PCI_VENDOR_ID_QCOM 0x17cb #define PCI_VENDOR_ID_CDNS 0x17cd +#define PCI_DEVICE_ID_CDNS_USBSS 0x0100 +#define PCI_DEVICE_ID_CDNS_USB 0x0120 +#define PCI_DEVICE_ID_CDNS_USBSSP 0x0200 #define PCI_VENDOR_ID_ARECA 0x17d3 #define PCI_DEVICE_ID_ARECA_1110 0x1110 @@ -2709,9 +2713,6 @@ #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 #define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a -#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb -#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 -#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index b4a4eb6c8866..b5b5d17998b8 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -21,8 +21,6 @@ #define DW_AN_C37_1000BASEX 4 #define DW_10GBASER 5 -struct dw_xpcs_desc; - enum dw_xpcs_pcs_id { DW_XPCS_ID_NATIVE = 0, NXP_SJA1105_XPCS_ID = 0x00000010, @@ -48,33 +46,18 @@ struct dw_xpcs_info { u32 pma; }; -enum dw_xpcs_clock { - DW_XPCS_CORE_CLK, - DW_XPCS_PAD_CLK, - DW_XPCS_NUM_CLKS, -}; - -struct dw_xpcs { - struct dw_xpcs_info info; - const struct dw_xpcs_desc *desc; - struct mdio_device *mdiodev; - struct clk_bulk_data clks[DW_XPCS_NUM_CLKS]; - struct phylink_pcs pcs; - phy_interface_t interface; -}; +struct dw_xpcs; +struct phylink_pcs *xpcs_to_phylink_pcs(struct dw_xpcs *xpcs); int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); -void xpcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode, - phy_interface_t interface, int speed, int duplex); -int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, - const unsigned long *advertising, unsigned int neg_mode); void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces); int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); -struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, - phy_interface_t interface); -struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, - phy_interface_t interface); +struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr); +struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode); void xpcs_destroy(struct dw_xpcs *xpcs); +struct phylink_pcs *xpcs_create_pcs_mdiodev(struct mii_bus *bus, int addr); +void xpcs_destroy_pcs(struct phylink_pcs *pcs); + #endif /* __LINUX_PCS_XPCS_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 8efce7414fad..35842d1e3879 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -220,15 +220,20 @@ do { \ (void)__vpp_verify; \ } while (0) +#define PERCPU_PTR(__p) \ +({ \ + unsigned long __pcpu_ptr = (__force unsigned long)(__p); \ + (typeof(*(__p)) __force __kernel *)(__pcpu_ptr); \ +}) + #ifdef CONFIG_SMP /* - * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() - * to prevent the compiler from making incorrect assumptions about the - * pointer value. The weird cast keeps both GCC and sparse happy. + * Add an offset to a pointer. Use RELOC_HIDE() to prevent the compiler + * from making incorrect assumptions about the pointer value. */ #define SHIFT_PERCPU_PTR(__p, __offset) \ - RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) + RELOC_HIDE(PERCPU_PTR(__p), (__offset)) #define per_cpu_ptr(ptr, cpu) \ ({ \ @@ -254,13 +259,13 @@ do { \ #else /* CONFIG_SMP */ -#define VERIFY_PERCPU_PTR(__p) \ +#define per_cpu_ptr(ptr, cpu) \ ({ \ - __verify_pcpu_ptr(__p); \ - (typeof(*(__p)) __kernel __force *)(__p); \ + (void)(cpu); \ + __verify_pcpu_ptr(ptr); \ + PERCPU_PTR(ptr); \ }) -#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) #define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) #define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 3372c1b56486..d698efba28a2 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -257,6 +257,7 @@ #define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ #define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ #define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ +#define ARMV8_PMU_USERENR_UEN (1 << 4) /* Fine grained per counter access at EL0 */ /* Mask for writable bits */ #define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \ ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index fb908843f209..cb99ec8c9e96 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -170,6 +170,12 @@ struct hw_perf_event { }; struct { /* aux / Intel-PT */ u64 aux_config; + /* + * For AUX area events, aux_paused cannot be a state + * flag because it can be updated asynchronously to + * state. + */ + unsigned int aux_paused; }; struct { /* software */ struct hrtimer hrtimer; @@ -294,6 +300,7 @@ struct perf_event_pmu_context; #define PERF_PMU_CAP_NO_EXCLUDE 0x0040 #define PERF_PMU_CAP_AUX_OUTPUT 0x0080 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 +#define PERF_PMU_CAP_AUX_PAUSE 0x0200 /** * pmu::scope @@ -384,6 +391,8 @@ struct pmu { #define PERF_EF_START 0x01 /* start the counter when adding */ #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ +#define PERF_EF_PAUSE 0x08 /* AUX area event, pause tracing */ +#define PERF_EF_RESUME 0x10 /* AUX area event, resume tracing */ /* * Adds/Removes a counter to/from the PMU, can be done inside a @@ -423,6 +432,18 @@ struct pmu { * * ->start() with PERF_EF_RELOAD will reprogram the counter * value, must be preceded by a ->stop() with PERF_EF_UPDATE. + * + * ->stop() with PERF_EF_PAUSE will stop as simply as possible. Will not + * overlap another ->stop() with PERF_EF_PAUSE nor ->start() with + * PERF_EF_RESUME. + * + * ->start() with PERF_EF_RESUME will start as simply as possible but + * only if the counter is not otherwise stopped. Will not overlap + * another ->start() with PERF_EF_RESUME nor ->stop() with + * PERF_EF_PAUSE. + * + * Notably, PERF_EF_PAUSE/PERF_EF_RESUME *can* be concurrent with other + * ->stop()/->start() invocations, just not itself. */ void (*start) (struct perf_event *event, int flags); void (*stop) (struct perf_event *event, int flags); @@ -1655,15 +1676,35 @@ extern void perf_tp_event(u16 event_type, u64 count, void *record, struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); -#ifndef perf_misc_flags -# define perf_misc_flags(regs) \ +extern unsigned long perf_misc_flags(struct perf_event *event, struct pt_regs *regs); +extern unsigned long perf_instruction_pointer(struct perf_event *event, + struct pt_regs *regs); + +#ifndef perf_arch_misc_flags +# define perf_arch_misc_flags(regs) \ (user_mode(regs) ? PERF_RECORD_MISC_USER : PERF_RECORD_MISC_KERNEL) -# define perf_instruction_pointer(regs) instruction_pointer(regs) +# define perf_arch_instruction_pointer(regs) instruction_pointer(regs) #endif #ifndef perf_arch_bpf_user_pt_regs # define perf_arch_bpf_user_pt_regs(regs) regs #endif +#ifndef perf_arch_guest_misc_flags +static inline unsigned long perf_arch_guest_misc_flags(struct pt_regs *regs) +{ + unsigned long guest_state = perf_guest_state(); + + if (!(guest_state & PERF_GUEST_ACTIVE)) + return 0; + + if (guest_state & PERF_GUEST_USER) + return PERF_RECORD_MISC_GUEST_USER; + else + return PERF_RECORD_MISC_GUEST_KERNEL; +} +# define perf_arch_guest_misc_flags(regs) perf_arch_guest_misc_flags(regs) +#endif + static inline bool has_branch_stack(struct perf_event *event) { return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; @@ -1679,6 +1720,13 @@ static inline bool has_aux(struct perf_event *event) return event->pmu->setup_aux; } +static inline bool has_aux_action(struct perf_event *event) +{ + return event->attr.aux_sample_size || + event->attr.aux_pause || + event->attr.aux_resume; +} + static inline bool is_write_backward(struct perf_event *event) { return !!event->attr.write_backward; diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 59a3deb792a8..0e43ab653ab6 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -12,45 +12,166 @@ #include <linux/page_ext.h> extern struct page_ext_operations page_alloc_tagging_ops; +extern unsigned long alloc_tag_ref_mask; +extern int alloc_tag_ref_offs; +extern struct alloc_tag_kernel_section kernel_tags; -static inline union codetag_ref *codetag_ref_from_page_ext(struct page_ext *page_ext) +DECLARE_STATIC_KEY_FALSE(mem_profiling_compressed); + +typedef u16 pgalloc_tag_idx; + +union pgtag_ref_handle { + union codetag_ref *ref; /* reference in page extension */ + struct page *page; /* reference in page flags */ +}; + +/* Reserved indexes */ +#define CODETAG_ID_NULL 0 +#define CODETAG_ID_EMPTY 1 +#define CODETAG_ID_FIRST 2 + +#ifdef CONFIG_MODULES + +extern struct alloc_tag_module_section module_tags; + +static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx) { - return (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); + return &module_tags.first_tag[idx - kernel_tags.count]; } -static inline struct page_ext *page_ext_from_codetag_ref(union codetag_ref *ref) +static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag) { - return (void *)ref - page_alloc_tagging_ops.offset; + return CODETAG_ID_FIRST + kernel_tags.count + (tag - module_tags.first_tag); } -/* Should be called only if mem_alloc_profiling_enabled() */ -static inline union codetag_ref *get_page_tag_ref(struct page *page) +#else /* CONFIG_MODULES */ + +static inline struct alloc_tag *module_idx_to_tag(pgalloc_tag_idx idx) { - if (page) { - struct page_ext *page_ext = page_ext_get(page); + pr_warn("invalid page tag reference %lu\n", (unsigned long)idx); + return NULL; +} - if (page_ext) - return codetag_ref_from_page_ext(page_ext); +static inline pgalloc_tag_idx module_tag_to_idx(struct alloc_tag *tag) +{ + pr_warn("invalid page tag 0x%lx\n", (unsigned long)tag); + return CODETAG_ID_NULL; +} + +#endif /* CONFIG_MODULES */ + +static inline void idx_to_ref(pgalloc_tag_idx idx, union codetag_ref *ref) +{ + switch (idx) { + case (CODETAG_ID_NULL): + ref->ct = NULL; + break; + case (CODETAG_ID_EMPTY): + set_codetag_empty(ref); + break; + default: + idx -= CODETAG_ID_FIRST; + ref->ct = idx < kernel_tags.count ? + &kernel_tags.first_tag[idx].ct : + &module_idx_to_tag(idx)->ct; + break; } - return NULL; } -static inline void put_page_tag_ref(union codetag_ref *ref) +static inline pgalloc_tag_idx ref_to_idx(union codetag_ref *ref) +{ + struct alloc_tag *tag; + + if (!ref->ct) + return CODETAG_ID_NULL; + + if (is_codetag_empty(ref)) + return CODETAG_ID_EMPTY; + + tag = ct_to_alloc_tag(ref->ct); + if (tag >= kernel_tags.first_tag && tag < kernel_tags.first_tag + kernel_tags.count) + return CODETAG_ID_FIRST + (tag - kernel_tags.first_tag); + + return module_tag_to_idx(tag); +} + + + +/* Should be called only if mem_alloc_profiling_enabled() */ +static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref, + union pgtag_ref_handle *handle) { - if (WARN_ON(!ref)) + if (!page) + return false; + + if (static_key_enabled(&mem_profiling_compressed)) { + pgalloc_tag_idx idx; + + idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask; + idx_to_ref(idx, ref); + handle->page = page; + } else { + struct page_ext *page_ext; + union codetag_ref *tmp; + + page_ext = page_ext_get(page); + if (!page_ext) + return false; + + tmp = (union codetag_ref *)page_ext_data(page_ext, &page_alloc_tagging_ops); + ref->ct = tmp->ct; + handle->ref = tmp; + } + + return true; +} + +static inline void put_page_tag_ref(union pgtag_ref_handle handle) +{ + if (WARN_ON(!handle.ref)) return; - page_ext_put(page_ext_from_codetag_ref(ref)); + if (!static_key_enabled(&mem_profiling_compressed)) + page_ext_put((void *)handle.ref - page_alloc_tagging_ops.offset); +} + +static inline void update_page_tag_ref(union pgtag_ref_handle handle, union codetag_ref *ref) +{ + if (static_key_enabled(&mem_profiling_compressed)) { + struct page *page = handle.page; + unsigned long old_flags; + unsigned long flags; + unsigned long idx; + + if (WARN_ON(!page || !ref)) + return; + + idx = (unsigned long)ref_to_idx(ref); + idx = (idx & alloc_tag_ref_mask) << alloc_tag_ref_offs; + do { + old_flags = READ_ONCE(page->flags); + flags = old_flags; + flags &= ~(alloc_tag_ref_mask << alloc_tag_ref_offs); + flags |= idx; + } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); + } else { + if (WARN_ON(!handle.ref || !ref)) + return; + + handle.ref->ct = ref->ct; + } } static inline void clear_page_tag_ref(struct page *page) { if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); + union pgtag_ref_handle handle; + union codetag_ref ref; - if (ref) { - set_codetag_empty(ref); - put_page_tag_ref(ref); + if (get_page_tag_ref(page, &ref, &handle)) { + set_codetag_empty(&ref); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); } } } @@ -59,11 +180,13 @@ static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) { if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); + union pgtag_ref_handle handle; + union codetag_ref ref; - if (ref) { - alloc_tag_add(ref, task->alloc_tag, PAGE_SIZE * nr); - put_page_tag_ref(ref); + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_add(&ref, task->alloc_tag, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); } } } @@ -71,11 +194,13 @@ static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) { if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); + union pgtag_ref_handle handle; + union codetag_ref ref; - if (ref) { - alloc_tag_sub(ref, PAGE_SIZE * nr); - put_page_tag_ref(ref); + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub(&ref, PAGE_SIZE * nr); + update_page_tag_ref(handle, &ref); + put_page_tag_ref(handle); } } } @@ -85,13 +210,14 @@ static inline struct alloc_tag *pgalloc_tag_get(struct page *page) struct alloc_tag *tag = NULL; if (mem_alloc_profiling_enabled()) { - union codetag_ref *ref = get_page_tag_ref(page); - - alloc_tag_sub_check(ref); - if (ref) { - if (ref->ct) - tag = ct_to_alloc_tag(ref->ct); - put_page_tag_ref(ref); + union pgtag_ref_handle handle; + union codetag_ref ref; + + if (get_page_tag_ref(page, &ref, &handle)) { + alloc_tag_sub_check(&ref); + if (ref.ct) + tag = ct_to_alloc_tag(ref.ct); + put_page_tag_ref(handle); } } @@ -104,16 +230,22 @@ static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) this_cpu_sub(tag->counters->bytes, PAGE_SIZE * nr); } +void pgalloc_tag_split(struct folio *folio, int old_order, int new_order); +void pgalloc_tag_copy(struct folio *new, struct folio *old); + +void __init alloc_tag_sec_init(void); + #else /* CONFIG_MEM_ALLOC_PROFILING */ -static inline union codetag_ref *get_page_tag_ref(struct page *page) { return NULL; } -static inline void put_page_tag_ref(union codetag_ref *ref) {} static inline void clear_page_tag_ref(struct page *page) {} static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} +static inline void alloc_tag_sec_init(void) {} +static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) {} +static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) {} #endif /* CONFIG_MEM_ALLOC_PROFILING */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index e8b2ac6bd2ae..adef9d6e9b1b 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -90,6 +90,27 @@ static inline unsigned long pud_index(unsigned long address) #define pgd_index(a) (((a) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) #endif +#ifndef kernel_pte_init +static inline void kernel_pte_init(void *addr) +{ +} +#define kernel_pte_init kernel_pte_init +#endif + +#ifndef pmd_init +static inline void pmd_init(void *addr) +{ +} +#define pmd_init pmd_init +#endif + +#ifndef pud_init +static inline void pud_init(void *addr) +{ +} +#define pud_init pud_init +#endif + #ifndef pte_offset_kernel static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) { @@ -1056,44 +1077,6 @@ static inline int pgd_same(pgd_t pgd_a, pgd_t pgd_b) } #endif -/* - * Use set_p*_safe(), and elide TLB flushing, when confident that *no* - * TLB flush will be required as a result of the "set". For example, use - * in scenarios where it is known ahead of time that the routine is - * setting non-present entries, or re-setting an existing entry to the - * same value. Otherwise, use the typical "set" helpers and flush the - * TLB. - */ -#define set_pte_safe(ptep, pte) \ -({ \ - WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \ - set_pte(ptep, pte); \ -}) - -#define set_pmd_safe(pmdp, pmd) \ -({ \ - WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \ - set_pmd(pmdp, pmd); \ -}) - -#define set_pud_safe(pudp, pud) \ -({ \ - WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \ - set_pud(pudp, pud); \ -}) - -#define set_p4d_safe(p4dp, p4d) \ -({ \ - WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \ - set_p4d(p4dp, p4d); \ -}) - -#define set_pgd_safe(pgdp, pgd) \ -({ \ - WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \ - set_pgd(pgdp, pgd); \ -}) - #ifndef __HAVE_ARCH_DO_SWAP_PAGE static inline void arch_do_swap_page_nr(struct mm_struct *mm, struct vm_area_struct *vma, diff --git a/include/linux/phy.h b/include/linux/phy.h index a98bc91a0cde..563c46205685 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -601,8 +601,8 @@ struct macsec_ops; * @adv_old: Saved advertised while power saving for WoL * @supported_eee: supported PHY EEE linkmodes * @advertising_eee: Currently advertised EEE linkmodes - * @eee_enabled: Flag indicating whether the EEE feature is enabled * @enable_tx_lpi: When True, MAC should transmit LPI to PHY + * @eee_active: phylib private state, indicating that EEE has been negotiated * @eee_cfg: User configuration of EEE * @lp_advertising: Current link partner advertised linkmodes * @host_interfaces: PHY interface modes supported by host @@ -721,16 +721,15 @@ struct phy_device { /* used for eee validation and configuration*/ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_eee); __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising_eee); - bool eee_enabled; - - /* Host supported PHY interface types. Should be ignored if empty. */ - DECLARE_PHY_INTERFACE_MASK(host_interfaces); - /* Energy efficient ethernet modes which should be prohibited */ - u32 eee_broken_modes; + __ETHTOOL_DECLARE_LINK_MODE_MASK(eee_broken_modes); bool enable_tx_lpi; + bool eee_active; struct eee_config eee_cfg; + /* Host supported PHY interface types. Should be ignored if empty. */ + DECLARE_PHY_INTERFACE_MASK(host_interfaces); + #ifdef CONFIG_LED_TRIGGER_PHY struct phy_led_trigger *phy_led_triggers; unsigned int phy_num_led_triggers; @@ -877,8 +876,9 @@ struct phy_plca_status { /* Modes for PHY LED configuration */ enum phy_led_modes { - PHY_LED_ACTIVE_LOW = 0, - PHY_LED_INACTIVE_HIGH_IMPEDANCE = 1, + PHY_LED_ACTIVE_HIGH = 0, + PHY_LED_ACTIVE_LOW = 1, + PHY_LED_INACTIVE_HIGH_IMPEDANCE = 2, /* keep it last */ __PHY_LED_MODES_NUM, @@ -1260,9 +1260,20 @@ size_t phy_speeds(unsigned int *speeds, size_t size, unsigned long *mask); void of_set_phy_supported(struct phy_device *phydev); void of_set_phy_eee_broken(struct phy_device *phydev); +void of_set_phy_timing_role(struct phy_device *phydev); int phy_speed_down_core(struct phy_device *phydev); /** + * phy_set_eee_broken - Mark an EEE mode as broken so that it isn't advertised. + * @phydev: The phy_device struct + * @link_mode: The broken EEE mode + */ +static inline void phy_set_eee_broken(struct phy_device *phydev, u32 link_mode) +{ + linkmode_set_bit(link_mode, phydev->eee_broken_modes); +} + +/** * phy_is_started - Convenience function to check whether PHY is started * @phydev: The phy_device struct */ @@ -1378,12 +1389,13 @@ int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); * @regnum: The register on the MMD to read * @val: Variable to read the register into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * @sleep_before_read: if it is true, sleep @sleep_us before read. - * Returns 0 on success and -ETIMEDOUT upon a timeout. In either + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout. In either * case, the last read value at @args is stored in @val. Must not * be called from atomic context if sleep_us or timeout_us are used. */ @@ -1883,7 +1895,6 @@ int genphy_read_abilities(struct phy_device *phydev); int genphy_setup_forced(struct phy_device *phydev); int genphy_restart_aneg(struct phy_device *phydev); int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart); -int genphy_config_eee_advert(struct phy_device *phydev); int __genphy_config_aneg(struct phy_device *phydev, bool changed); int genphy_aneg_done(struct phy_device *phydev); int genphy_update_link(struct phy_device *phydev); @@ -1951,7 +1962,6 @@ int genphy_c45_ethtool_get_eee(struct phy_device *phydev, struct ethtool_keee *data); int genphy_c45_ethtool_set_eee(struct phy_device *phydev, struct ethtool_keee *data); -int genphy_c45_write_eee_adv(struct phy_device *phydev, unsigned long *adv); int genphy_c45_an_config_eee_aneg(struct phy_device *phydev); int genphy_c45_read_eee_adv(struct phy_device *phydev, unsigned long *adv); diff --git a/include/linux/phy/phy-sun4i-usb.h b/include/linux/phy/phy-sun4i-usb.h index 91eb755ee73b..f3e7b13608e4 100644 --- a/include/linux/phy/phy-sun4i-usb.h +++ b/include/linux/phy/phy-sun4i-usb.h @@ -11,7 +11,7 @@ /** * sun4i_usb_phy_set_squelch_detect() - Enable/disable squelch detect * @phy: reference to a sun4i usb phy - * @enabled: wether to enable or disable squelch detect + * @enabled: whether to enable or disable squelch detect */ void sun4i_usb_phy_set_squelch_detect(struct phy *phy, bool enabled); diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index f9c00f839e9f..085dd8e8af76 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -13,8 +13,6 @@ #include <linux/dmaengine.h> -extern void s3c64xx_ac97_setup_gpio(int); - struct samsung_i2s_type { /* If the Primary DAI has 5.1 Channels */ #define QUIRK_PRI_6CHAN (1 << 0) diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index e574b790be6f..b3c4993e656e 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -1312,6 +1312,38 @@ enum ec_feature_code { * The EC supports the AP composing VDMs for us to send. */ EC_FEATURE_TYPEC_AP_VDM_SEND = 46, + /* + * The EC supports system safe mode panic recovery. + */ + EC_FEATURE_SYSTEM_SAFE_MODE = 47, + /* + * The EC will reboot on runtime assertion failures. + */ + EC_FEATURE_ASSERT_REBOOTS = 48, + /* + * The EC image is built with tokenized logging enabled. + */ + EC_FEATURE_TOKENIZED_LOGGING = 49, + /* + * The EC supports triggering an STB dump. + */ + EC_FEATURE_AMD_STB_DUMP = 50, + /* + * The EC supports memory dump commands. + */ + EC_FEATURE_MEMORY_DUMP = 51, + /* + * The EC supports DP2.1 capability + */ + EC_FEATURE_TYPEC_DP2_1 = 52, + /* + * The MCU is System Companion Processor Core 1 + */ + EC_FEATURE_SCP_C1 = 53, + /* + * The EC supports UCSI PPM. + */ + EC_FEATURE_UCSI_PPM = 54, }; #define EC_FEATURE_MASK_0(event_code) BIT(event_code % 32) diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h index 1707ad4147df..7d21e0c41037 100644 --- a/include/linux/platform_data/hwmon-s3c.h +++ b/include/linux/platform_data/hwmon-s3c.h @@ -33,14 +33,4 @@ struct s3c_hwmon_pdata { struct s3c_hwmon_chcfg *in[8]; }; -/** - * s3c_hwmon_set_platdata - Set platform data for S3C HWMON device - * @pd: Platform data to register to device. - * - * Register the given platform data for use with the S3C HWMON device. - * The call will copy the platform data, so the board definitions can - * make the structure itself __initdata. - */ -extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd); - #endif /* __HWMON_S3C_H__ */ diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h deleted file mode 100644 index 65bfdb4fdc15..000000000000 --- a/include/linux/platform_data/max6639.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_MAX6639_H -#define _LINUX_MAX6639_H - -#include <linux/types.h> - -/* platform data for the MAX6639 temperature sensor and fan control */ - -struct max6639_platform_data { - bool pwm_polarity; /* Polarity low (0) or high (1, default) */ - int ppr; /* Pulses per rotation 1..4 (default == 2) */ - int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */ -}; - -#endif /* _LINUX_MAX6639_H */ diff --git a/include/linux/platform_data/media/omap4iss.h b/include/linux/platform_data/media/omap4iss.h deleted file mode 100644 index 2a511a8fcda7..000000000000 --- a/include/linux/platform_data/media/omap4iss.h +++ /dev/null @@ -1,66 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ARCH_ARM_PLAT_OMAP4_ISS_H -#define ARCH_ARM_PLAT_OMAP4_ISS_H - -#include <linux/i2c.h> - -struct iss_device; - -enum iss_interface_type { - ISS_INTERFACE_CSI2A_PHY1, - ISS_INTERFACE_CSI2B_PHY2, -}; - -/** - * struct iss_csiphy_lane: CSI2 lane position and polarity - * @pos: position of the lane - * @pol: polarity of the lane - */ -struct iss_csiphy_lane { - u8 pos; - u8 pol; -}; - -#define ISS_CSIPHY1_NUM_DATA_LANES 4 -#define ISS_CSIPHY2_NUM_DATA_LANES 1 - -/** - * struct iss_csiphy_lanes_cfg - CSI2 lane configuration - * @data: Configuration of one or two data lanes - * @clk: Clock lane configuration - */ -struct iss_csiphy_lanes_cfg { - struct iss_csiphy_lane data[ISS_CSIPHY1_NUM_DATA_LANES]; - struct iss_csiphy_lane clk; -}; - -/** - * struct iss_csi2_platform_data - CSI2 interface platform data - * @crc: Enable the cyclic redundancy check - * @vpclk_div: Video port output clock control - */ -struct iss_csi2_platform_data { - unsigned crc:1; - unsigned vpclk_div:2; - struct iss_csiphy_lanes_cfg lanecfg; -}; - -struct iss_subdev_i2c_board_info { - struct i2c_board_info *board_info; - int i2c_adapter_id; -}; - -struct iss_v4l2_subdevs_group { - struct iss_subdev_i2c_board_info *subdevs; - enum iss_interface_type interface; - union { - struct iss_csi2_platform_data csi2; - } bus; /* gcc < 4.6.0 chokes on anonymous union initializers */ -}; - -struct iss_platform_data { - struct iss_v4l2_subdevs_group *subdevs; - void (*set_constraints)(struct iss_device *iss, bool enable); -}; - -#endif diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h index 2ee1a679e592..0e0e8fe6975f 100644 --- a/include/linux/platform_data/microchip-ksz.h +++ b/include/linux/platform_data/microchip-ksz.h @@ -42,6 +42,7 @@ enum ksz_chip_id { LAN9372_CHIP_ID = 0x00937200, LAN9373_CHIP_ID = 0x00937300, LAN9374_CHIP_ID = 0x00937400, + LAN9646_CHIP_ID = 0x00964600, }; struct ksz_platform_data { diff --git a/include/linux/platform_data/sa11x0-serial.h b/include/linux/platform_data/sa11x0-serial.h index 8b79ab08af45..a88096bc74e4 100644 --- a/include/linux/platform_data/sa11x0-serial.h +++ b/include/linux/platform_data/sa11x0-serial.h @@ -10,7 +10,6 @@ #define SA11X0_SERIAL_H struct uart_port; -struct uart_info; /* * This is a temporary structure for registering these diff --git a/include/linux/platform_data/x86/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h index 0ca9962e97f2..b287627759f7 100644 --- a/include/linux/platform_data/x86/intel_scu_ipc.h +++ b/include/linux/platform_data/x86/intel_scu_ipc.h @@ -2,9 +2,13 @@ #ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_ #define __PLATFORM_X86_INTEL_SCU_IPC_H_ +#include <linux/init.h> #include <linux/ioport.h> +#include <linux/types.h> struct device; +struct module; + struct intel_scu_ipc_dev; /** diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index cf4b11be3709..45646bfcaf1a 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -30,9 +30,16 @@ * supplier and its PM domain when creating the * device-links. * + * PD_FLAG_REQUIRED_OPP: Assign required_devs for the required OPPs. The + * index of the required OPP must correspond to the + * index in the array of the pd_names. If pd_names + * isn't specified, the index just follows the + * index for the attached PM domain. + * */ #define PD_FLAG_NO_DEV_LINK BIT(0) #define PD_FLAG_DEV_LINK_ON BIT(1) +#define PD_FLAG_REQUIRED_OPP BIT(2) struct dev_pm_domain_attach_data { const char * const *pd_names; @@ -43,6 +50,7 @@ struct dev_pm_domain_attach_data { struct dev_pm_domain_list { struct device **pd_devs; struct device_link **pd_links; + u32 *opp_tokens; u32 num_pds; }; @@ -250,6 +258,7 @@ struct generic_pm_domain_data { unsigned int performance_state; unsigned int default_pstate; unsigned int rpm_pstate; + unsigned int opp_token; bool hw_mode; void *data; }; diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 6424692c30b7..568183e3e641 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -62,11 +62,8 @@ typedef int (*config_clks_t)(struct device *dev, struct opp_table *opp_table, * @supported_hw: Array of hierarchy of versions to match. * @supported_hw_count: Number of elements in the array. * @regulator_names: Array of pointers to the names of the regulator, NULL terminated. - * @genpd_names: Null terminated array of pointers containing names of genpd to - * attach. Mutually exclusive with required_devs. - * @virt_devs: Pointer to return the array of genpd virtual devices. Mutually - * exclusive with required_devs. - * @required_devs: Required OPP devices. Mutually exclusive with genpd_names/virt_devs. + * @required_dev: The required OPP device. + * @required_dev_index: The index of the required OPP for the @required_dev. * * This structure contains platform specific OPP configurations for the device. */ @@ -79,9 +76,8 @@ struct dev_pm_opp_config { const unsigned int *supported_hw; unsigned int supported_hw_count; const char * const *regulator_names; - const char * const *genpd_names; - struct device ***virt_devs; - struct device **required_devs; + struct device *required_dev; + unsigned int required_dev_index; }; #define OPP_LEVEL_UNSET U32_MAX @@ -675,36 +671,6 @@ static inline void dev_pm_opp_put_config_regulators(int token) dev_pm_opp_clear_config(token); } -/* genpd helpers */ -static inline int dev_pm_opp_attach_genpd(struct device *dev, - const char * const *names, - struct device ***virt_devs) -{ - struct dev_pm_opp_config config = { - .genpd_names = names, - .virt_devs = virt_devs, - }; - - return dev_pm_opp_set_config(dev, &config); -} - -static inline void dev_pm_opp_detach_genpd(int token) -{ - dev_pm_opp_clear_config(token); -} - -static inline int devm_pm_opp_attach_genpd(struct device *dev, - const char * const *names, - struct device ***virt_devs) -{ - struct dev_pm_opp_config config = { - .genpd_names = names, - .virt_devs = virt_devs, - }; - - return devm_pm_opp_set_config(dev, &config); -} - /* prop-name helpers */ static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) { diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 76cd1f9f1365..222f7530806c 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h @@ -10,7 +10,7 @@ #define _LINUX_PM_WAKEUP_H #ifndef _DEVICE_H_ -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif #include <linux/types.h> diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 453691710839..f11f10c97bd9 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -5,12 +5,16 @@ #include <linux/alarmtimer.h> #include <linux/list.h> #include <linux/mutex.h> +#include <linux/pid.h> #include <linux/posix-timers_types.h> +#include <linux/rcuref.h> #include <linux/spinlock.h> #include <linux/timerqueue.h> struct kernel_siginfo; struct task_struct; +struct sigqueue; +struct k_itimer; static inline clockid_t make_process_cpuclock(const unsigned int pid, const clockid_t clock) @@ -35,6 +39,8 @@ static inline int clockid_to_fd(const clockid_t clk) #ifdef CONFIG_POSIX_TIMERS +#include <linux/signal_types.h> + /** * cpu_timer - Posix CPU timer representation for k_itimer * @node: timerqueue node to queue in the task/sig @@ -42,6 +48,7 @@ static inline int clockid_to_fd(const clockid_t clk) * @pid: Pointer to target task PID * @elist: List head for the expiry list * @firing: Timer is currently firing + * @nanosleep: Timer is used for nanosleep and is not a regular posix-timer * @handling: Pointer to the task which handles expiry */ struct cpu_timer { @@ -49,7 +56,8 @@ struct cpu_timer { struct timerqueue_head *head; struct pid *pid; struct list_head elist; - int firing; + bool firing; + bool nanosleep; struct task_struct __rcu *handling; }; @@ -101,6 +109,12 @@ static inline void posix_cputimers_rt_watchdog(struct posix_cputimers *pct, pct->bases[CPUCLOCK_SCHED].nextevt = runtime; } +void posixtimer_rearm_itimer(struct task_struct *p); +bool posixtimer_init_sigqueue(struct sigqueue *q); +void posixtimer_send_sigqueue(struct k_itimer *tmr); +bool posixtimer_deliver_signal(struct kernel_siginfo *info, struct sigqueue *timer_sigq); +void posixtimer_free_timer(struct k_itimer *timer); + /* Init task static initializer */ #define INIT_CPU_TIMERBASE(b) { \ .nextevt = U64_MAX, \ @@ -122,6 +136,10 @@ struct cpu_timer { }; static inline void posix_cputimers_init(struct posix_cputimers *pct) { } static inline void posix_cputimers_group_init(struct posix_cputimers *pct, u64 cpu_limit) { } +static inline void posixtimer_rearm_itimer(struct task_struct *p) { } +static inline bool posixtimer_deliver_signal(struct kernel_siginfo *info, + struct sigqueue *timer_sigq) { return false; } +static inline void posixtimer_free_timer(struct k_itimer *timer) { } #endif #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK @@ -132,50 +150,56 @@ static inline void clear_posix_cputimers_work(struct task_struct *p) { } static inline void posix_cputimers_init_work(void) { } #endif -#define REQUEUE_PENDING 1 - /** * struct k_itimer - POSIX.1b interval timer structure. - * @list: List head for binding the timer to signals->posix_timers + * @list: List node for binding the timer to tsk::signal::posix_timers + * @ignored_list: List node for tracking ignored timers in tsk::signal::ignored_posix_timers * @t_hash: Entry in the posix timer hash table * @it_lock: Lock protecting the timer * @kclock: Pointer to the k_clock struct handling this timer * @it_clock: The posix timer clock id * @it_id: The posix timer id for identifying the timer - * @it_active: Marker that timer is active + * @it_status: The status of the timer + * @it_sig_periodic: The periodic status at signal delivery * @it_overrun: The overrun counter for pending signals * @it_overrun_last: The overrun at the time of the last delivered signal - * @it_requeue_pending: Indicator that timer waits for being requeued on - * signal delivery + * @it_signal_seq: Sequence count to control signal delivery + * @it_sigqueue_seq: The sequence count at the point where the signal was queued * @it_sigev_notify: The notify word of sigevent struct for signal delivery * @it_interval: The interval for periodic timers * @it_signal: Pointer to the creators signal struct * @it_pid: The pid of the process/task targeted by the signal * @it_process: The task to wakeup on clock_nanosleep (CPU timers) - * @sigq: Pointer to preallocated sigqueue + * @rcuref: Reference count for life time management + * @sigq: Embedded sigqueue * @it: Union representing the various posix timer type * internals. * @rcu: RCU head for freeing the timer. */ struct k_itimer { struct hlist_node list; + struct hlist_node ignored_list; struct hlist_node t_hash; spinlock_t it_lock; const struct k_clock *kclock; clockid_t it_clock; timer_t it_id; - int it_active; + int it_status; + bool it_sig_periodic; s64 it_overrun; s64 it_overrun_last; - int it_requeue_pending; + unsigned int it_signal_seq; + unsigned int it_sigqueue_seq; int it_sigev_notify; + enum pid_type it_pid_type; ktime_t it_interval; struct signal_struct *it_signal; union { struct pid *it_pid; struct task_struct *it_process; }; - struct sigqueue *sigq; + struct sigqueue sigq; + rcuref_t rcuref; union { struct { struct hrtimer timer; @@ -196,5 +220,29 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, int update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new); -void posixtimer_rearm(struct kernel_siginfo *info); +#ifdef CONFIG_POSIX_TIMERS +static inline void posixtimer_putref(struct k_itimer *tmr) +{ + if (rcuref_put(&tmr->rcuref)) + posixtimer_free_timer(tmr); +} + +static inline void posixtimer_sigqueue_getref(struct sigqueue *q) +{ + struct k_itimer *tmr = container_of(q, struct k_itimer, sigq); + + WARN_ON_ONCE(!rcuref_get(&tmr->rcuref)); +} + +static inline void posixtimer_sigqueue_putref(struct sigqueue *q) +{ + struct k_itimer *tmr = container_of(q, struct k_itimer, sigq); + + posixtimer_putref(tmr); +} +#else /* CONFIG_POSIX_TIMERS */ +static inline void posixtimer_sigqueue_getref(struct sigqueue *q) { } +static inline void posixtimer_sigqueue_putref(struct sigqueue *q) { } +#endif /* !CONFIG_POSIX_TIMERS */ + #endif diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 0e65b3d634d9..e2d47eb1a7f3 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -28,9 +28,9 @@ struct posix_acl_entry { struct posix_acl { refcount_t a_refcount; - struct rcu_head a_rcu; unsigned int a_count; - struct posix_acl_entry a_entries[]; + struct rcu_head a_rcu; + struct posix_acl_entry a_entries[] __counted_by(a_count); }; #define FOREACH_ACL_ENTRY(pa, acl, pe) \ @@ -62,7 +62,7 @@ posix_acl_release(struct posix_acl *acl) /* posix_acl.c */ extern void posix_acl_init(struct posix_acl *, int); -extern struct posix_acl *posix_acl_alloc(int, gfp_t); +extern struct posix_acl *posix_acl_alloc(unsigned int count, gfp_t flags); extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 910d407ebe63..b98106e1a90f 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -236,6 +236,8 @@ struct power_supply_config { char **supplied_to; size_t num_supplicants; + + bool no_wakeup_source; }; /* Description of power supply */ @@ -750,9 +752,9 @@ struct power_supply_battery_info { int temp_alert_max; int temp_min; int temp_max; - struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; + const struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX]; int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX]; - struct power_supply_resistance_temp_table *resist_table; + const struct power_supply_resistance_temp_table *resist_table; int resist_table_size; const struct power_supply_vbat_ri_table *vbat2ri_discharging; int vbat2ri_discharging_size; @@ -797,15 +799,15 @@ extern bool power_supply_battery_info_has_prop(struct power_supply_battery_info extern int power_supply_battery_info_get_prop(struct power_supply_battery_info *info, enum power_supply_property psp, union power_supply_propval *val); -extern int power_supply_ocv2cap_simple(struct power_supply_battery_ocv_table *table, +extern int power_supply_ocv2cap_simple(const struct power_supply_battery_ocv_table *table, int table_len, int ocv); -extern struct power_supply_battery_ocv_table * +extern const struct power_supply_battery_ocv_table * power_supply_find_ocv2cap_table(struct power_supply_battery_info *info, int temp, int *table_len); extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info, int ocv, int temp); extern int -power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table, +power_supply_temp2resist_simple(const struct power_supply_resistance_temp_table *table, int table_len, int temp); extern int power_supply_vbat2ri(struct power_supply_battery_info *info, int vbat_uv, bool charging); @@ -863,8 +865,6 @@ static inline int power_supply_set_property(struct power_supply *psy, const union power_supply_propval *val) { return 0; } #endif -extern int power_supply_property_is_writeable(struct power_supply *psy, - enum power_supply_property psp); extern void power_supply_external_power_changed(struct power_supply *psy); extern struct power_supply *__must_check @@ -872,17 +872,9 @@ power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); extern struct power_supply *__must_check -power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg); -extern struct power_supply *__must_check devm_power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg); -extern struct power_supply *__must_check -devm_power_supply_register_no_ws(struct device *parent, - const struct power_supply_desc *desc, - const struct power_supply_config *cfg); extern void power_supply_unregister(struct power_supply *psy); extern int power_supply_powers(struct power_supply *psy, struct device *dev); @@ -945,19 +937,6 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp) return false; } -#ifdef CONFIG_POWER_SUPPLY_HWMON -int power_supply_add_hwmon_sysfs(struct power_supply *psy); -void power_supply_remove_hwmon_sysfs(struct power_supply *psy); -#else -static inline int power_supply_add_hwmon_sysfs(struct power_supply *psy) -{ - return 0; -} - -static inline -void power_supply_remove_hwmon_sysfs(struct power_supply *psy) {} -#endif - #ifdef CONFIG_SYSFS ssize_t power_supply_charge_behaviour_show(struct device *dev, unsigned int available_behaviours, diff --git a/include/linux/prandom.h b/include/linux/prandom.h index f7f1e5251c67..f2ed5b72b3d6 100644 --- a/include/linux/prandom.h +++ b/include/linux/prandom.h @@ -10,6 +10,7 @@ #include <linux/types.h> #include <linux/once.h> +#include <linux/percpu.h> #include <linux/random.h> struct rnd_state { diff --git a/include/linux/preempt.h b/include/linux/preempt.h index ce76f1a45722..ca86235ac15c 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -486,6 +486,7 @@ DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) extern bool preempt_model_none(void); extern bool preempt_model_voluntary(void); extern bool preempt_model_full(void); +extern bool preempt_model_lazy(void); #else @@ -502,6 +503,11 @@ static inline bool preempt_model_full(void) return IS_ENABLED(CONFIG_PREEMPT); } +static inline bool preempt_model_lazy(void) +{ + return IS_ENABLED(CONFIG_PREEMPT_LAZY); +} + #endif static inline bool preempt_model_rt(void) @@ -519,7 +525,7 @@ static inline bool preempt_model_rt(void) */ static inline bool preempt_model_preemptible(void) { - return preempt_model_full() || preempt_model_rt(); + return preempt_model_full() || preempt_model_lazy() || preempt_model_rt(); } #endif /* __LINUX_PREEMPT_H */ diff --git a/include/linux/printk.h b/include/linux/printk.h index eca9bb2ee637..4217a9f412b2 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -166,6 +166,9 @@ __printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); extern void __printk_deferred_enter(void); extern void __printk_deferred_exit(void); +extern void printk_force_console_enter(void); +extern void printk_force_console_exit(void); + /* * The printk_deferred_enter/exit macros are available only as a hack for * some code paths that need to defer all printk console printing. Interrupts @@ -229,6 +232,14 @@ static inline void printk_deferred_exit(void) { } +static inline void printk_force_console_enter(void) +{ +} + +static inline void printk_force_console_exit(void) +{ +} + static inline int printk_ratelimit(void) { return 0; diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 8acd60b53f58..78827f312407 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -49,6 +49,31 @@ enum { PWMF_EXPORTED = 1, }; +/** + * struct pwm_waveform - description of a PWM waveform + * @period_length_ns: PWM period + * @duty_length_ns: PWM duty cycle + * @duty_offset_ns: offset of the rising edge from the period's start + * + * This is a representation of a PWM waveform alternative to struct pwm_state + * below. It's more expressive than struct pwm_state as it contains a + * duty_offset_ns and so can represent offsets other than zero (with .polarity = + * PWM_POLARITY_NORMAL) and period - duty_cycle (.polarity = + * PWM_POLARITY_INVERSED). + * + * Note there is no explicit bool for enabled. A "disabled" PWM is represented + * by .period_length_ns = 0. Note further that the behaviour of a "disabled" PWM + * is undefined. Depending on the hardware's capabilities it might drive the + * active or inactive level, go high-z or even continue to toggle. + * + * The unit for all three members is nanoseconds. + */ +struct pwm_waveform { + u64 period_length_ns; + u64 duty_length_ns; + u64 duty_offset_ns; +}; + /* * struct pwm_state - state of a PWM channel * @period: PWM period (in nanoseconds) @@ -251,6 +276,11 @@ struct pwm_capture { * @request: optional hook for requesting a PWM * @free: optional hook for freeing a PWM * @capture: capture and report PWM signal + * @sizeof_wfhw: size (in bytes) of driver specific waveform presentation + * @round_waveform_tohw: convert a struct pwm_waveform to driver specific presentation + * @round_waveform_fromhw: convert a driver specific waveform presentation to struct pwm_waveform + * @read_waveform: read driver specific waveform presentation from hardware + * @write_waveform: write driver specific waveform presentation to hardware * @apply: atomically apply a new PWM config * @get_state: get the current PWM state. */ @@ -259,6 +289,17 @@ struct pwm_ops { void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_capture *result, unsigned long timeout); + + size_t sizeof_wfhw; + int (*round_waveform_tohw)(struct pwm_chip *chip, struct pwm_device *pwm, + const struct pwm_waveform *wf, void *wfhw); + int (*round_waveform_fromhw)(struct pwm_chip *chip, struct pwm_device *pwm, + const void *wfhw, struct pwm_waveform *wf); + int (*read_waveform)(struct pwm_chip *chip, struct pwm_device *pwm, + void *wfhw); + int (*write_waveform)(struct pwm_chip *chip, struct pwm_device *pwm, + const void *wfhw); + int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state); int (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, @@ -275,6 +316,9 @@ struct pwm_ops { * @of_xlate: request a PWM device given a device tree PWM specifier * @atomic: can the driver's ->apply() be called in atomic context * @uses_pwmchip_alloc: signals if pwmchip_allow was used to allocate this chip + * @operational: signals if the chip can be used (or is already deregistered) + * @nonatomic_lock: mutex for nonatomic chips + * @atomic_lock: mutex for atomic chips * @pwms: array of PWM devices allocated by the framework */ struct pwm_chip { @@ -290,6 +334,16 @@ struct pwm_chip { /* only used internally by the PWM framework */ bool uses_pwmchip_alloc; + bool operational; + union { + /* + * depending on the chip being atomic or not either the mutex or + * the spinlock is used. It protects .operational and + * synchronizes the callbacks in .ops + */ + struct mutex nonatomic_lock; + spinlock_t atomic_lock; + }; struct pwm_device pwms[] __counted_by(npwm); }; @@ -309,9 +363,14 @@ static inline void pwmchip_set_drvdata(struct pwm_chip *chip, void *data) } #if IS_ENABLED(CONFIG_PWM) -/* PWM user APIs */ + +/* PWM consumer APIs */ +int pwm_round_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf); +int pwm_get_waveform_might_sleep(struct pwm_device *pwm, struct pwm_waveform *wf); +int pwm_set_waveform_might_sleep(struct pwm_device *pwm, const struct pwm_waveform *wf, bool exact); int pwm_apply_might_sleep(struct pwm_device *pwm, const struct pwm_state *state); int pwm_apply_atomic(struct pwm_device *pwm, const struct pwm_state *state); +int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state); int pwm_adjust_config(struct pwm_device *pwm); /** @@ -436,6 +495,11 @@ static inline int pwm_apply_atomic(struct pwm_device *pwm, return -EOPNOTSUPP; } +static inline int pwm_get_state_hw(struct pwm_device *pwm, struct pwm_state *state) +{ + return -EOPNOTSUPP; +} + static inline int pwm_adjust_config(struct pwm_device *pwm) { return -EOPNOTSUPP; diff --git a/include/linux/random.h b/include/linux/random.h index b0a940af4fff..333cecfca93f 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -145,13 +145,6 @@ declare_get_random_var_wait(u64, u32) declare_get_random_var_wait(long, unsigned long) #undef declare_get_random_var -/* - * This is designed to be standalone for just prandom - * users, but for now we include it from <linux/random.h> - * for legacy reasons. - */ -#include <linux/prandom.h> - #ifdef CONFIG_SMP int random_prepare_cpu(unsigned int cpu); int random_online_cpu(unsigned int cpu); diff --git a/include/linux/range.h b/include/linux/range.h index 6ad0b73cb7ad..d7f98e1285d7 100644 --- a/include/linux/range.h +++ b/include/linux/range.h @@ -13,11 +13,20 @@ static inline u64 range_len(const struct range *range) return range->end - range->start + 1; } -static inline bool range_contains(struct range *r1, struct range *r2) +/* True if r1 completely contains r2 */ +static inline bool range_contains(const struct range *r1, + const struct range *r2) { return r1->start <= r2->start && r1->end >= r2->end; } +/* True if any part of r1 overlaps r2 */ +static inline bool range_overlaps(const struct range *r1, + const struct range *r2) +{ + return r1->start <= r2->end && r1->end >= r2->start; +} + int add_range(struct range *range, int az, int nr_range, u64 start, u64 end); @@ -31,4 +40,10 @@ int clean_sort_range(struct range *range, int az); void sort_range(struct range *range, int nr_range); +#define DEFINE_RANGE(_start, _end) \ +(struct range) { \ + .start = (_start), \ + .end = (_end), \ + } + #endif diff --git a/include/linux/rbtree_latch.h b/include/linux/rbtree_latch.h index 6a0999c26c7c..2f630eb8307e 100644 --- a/include/linux/rbtree_latch.h +++ b/include/linux/rbtree_latch.h @@ -14,7 +14,7 @@ * * If we need to allow unconditional lookups (say as required for NMI context * usage) we need a more complex setup; this data structure provides this by - * employing the latch technique -- see @raw_write_seqcount_latch -- to + * employing the latch technique -- see @write_seqcount_latch_begin -- to * implement a latched RB-tree which does allow for unconditional lookups by * virtue of always having (at least) one stable copy of the tree. * @@ -132,7 +132,7 @@ __lt_find(void *key, struct latch_tree_root *ltr, int idx, * @ops: operators defining the node order * * It inserts @node into @root in an ordered fashion such that we can always - * observe one complete tree. See the comment for raw_write_seqcount_latch(). + * observe one complete tree. See the comment for write_seqcount_latch_begin(). * * The inserts use rcu_assign_pointer() to publish the element such that the * tree structure is stored before we can observe the new @node. @@ -145,10 +145,11 @@ latch_tree_insert(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) { - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch_begin(&root->seq); __lt_insert(node, root, 0, ops->less); - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch(&root->seq); __lt_insert(node, root, 1, ops->less); + write_seqcount_latch_end(&root->seq); } /** @@ -159,7 +160,7 @@ latch_tree_insert(struct latch_tree_node *node, * * Removes @node from the trees @root in an ordered fashion such that we can * always observe one complete tree. See the comment for - * raw_write_seqcount_latch(). + * write_seqcount_latch_begin(). * * It is assumed that @node will observe one RCU quiescent state before being * reused of freed. @@ -172,10 +173,11 @@ latch_tree_erase(struct latch_tree_node *node, struct latch_tree_root *root, const struct latch_tree_ops *ops) { - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch_begin(&root->seq); __lt_erase(node, root, 0); - raw_write_seqcount_latch(&root->seq); + write_seqcount_latch(&root->seq); __lt_erase(node, root, 1); + write_seqcount_latch_end(&root->seq); } /** @@ -204,9 +206,9 @@ latch_tree_find(void *key, struct latch_tree_root *root, unsigned int seq; do { - seq = raw_read_seqcount_latch(&root->seq); + seq = read_seqcount_latch(&root->seq); node = __lt_find(key, root, seq & 1, ops->comp); - } while (raw_read_seqcount_latch_retry(&root->seq, seq)); + } while (read_seqcount_latch_retry(&root->seq, seq)); return node; } diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 58d84c59f3dd..48e5c03df1dd 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -401,7 +401,7 @@ static inline int debug_lockdep_rcu_enabled(void) */ #define RCU_LOCKDEP_WARN(c, s) \ do { \ - static bool __section(".data.unlikely") __warned; \ + static bool __section(".data..unlikely") __warned; \ if (debug_lockdep_rcu_enabled() && (c) && \ debug_lockdep_rcu_enabled() && !__warned) { \ __warned = true; \ diff --git a/include/linux/rcupdate_trace.h b/include/linux/rcupdate_trace.h index eda493200663..e6c44eb428ab 100644 --- a/include/linux/rcupdate_trace.h +++ b/include/linux/rcupdate_trace.h @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/rcupdate.h> +#include <linux/cleanup.h> extern struct lockdep_map rcu_trace_lock_map; @@ -98,4 +99,8 @@ static inline void rcu_read_lock_trace(void) { BUG(); } static inline void rcu_read_unlock_trace(void) { BUG(); } #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */ +DEFINE_LOCK_GUARD_0(rcu_tasks_trace, + rcu_read_lock_trace(), + rcu_read_unlock_trace()) + #endif /* __LINUX_RCUPDATE_TRACE_H */ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 0ee270b3f5ed..fe42315f667f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -165,7 +165,6 @@ static inline bool rcu_inkernel_boot_has_ended(void) { return true; } static inline bool rcu_is_watching(void) { return true; } static inline void rcu_momentary_eqs(void) { } static inline void kfree_rcu_scheduler_running(void) { } -static inline bool rcu_gp_might_be_stalled(void) { return false; } /* Avoid RCU read-side critical sections leaking across. */ static inline void rcu_all_qs(void) { barrier(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 90a684f94776..27d86d912781 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -40,7 +40,6 @@ void kvfree_rcu_barrier(void); void rcu_barrier(void); void rcu_momentary_eqs(void); void kfree_rcu_scheduler_running(void); -bool rcu_gp_might_be_stalled(void); struct rcu_gp_oldstate { unsigned long rgos_norm; diff --git a/include/linux/regmap.h b/include/linux/regmap.h index f9ccad32fc5c..fd41baccbf3e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -54,7 +54,14 @@ struct sdw_slave; #define REGMAP_UPSHIFT(s) (-(s)) #define REGMAP_DOWNSHIFT(s) (s) -/* An enum of all the supported cache types */ +/* + * The supported cache types, the default is no cache. Any new caches + * should usually use the maple tree cache unless they specifically + * require that there are never any allocations at runtime and can't + * provide defaults in which case they should use the flat cache. The + * rbtree cache *may* have some performance advantage for very low end + * systems that make heavy use of cache syncs but is mainly legacy. + */ enum regcache_type { REGCACHE_NONE, REGCACHE_RBTREE, @@ -106,17 +113,17 @@ struct reg_sequence { * @addr: Address to poll * @val: Unsigned integer variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read * error return value in case of a error read. In the two former cases, * the last read value at @addr is stored in @val. Must not be called * from atomic context if sleep_us or timeout_us are used. - * - * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. */ #define regmap_read_poll_timeout(map, addr, val, cond, sleep_us, timeout_us) \ ({ \ @@ -133,20 +140,20 @@ struct reg_sequence { * @addr: Address to poll * @val: Unsigned integer variable to read the value into * @cond: Break condition (usually involving @val) - * @delay_us: Time to udelay between reads in us (0 tight-loops). - * Should be less than ~10us since udelay is used - * (see Documentation/timers/timers-howto.rst). + * @delay_us: Time to udelay between reads in us (0 tight-loops). Please + * read udelay() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read - * error return value in case of a error read. In the two former cases, - * the last read value at @addr is stored in @val. - * * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h. * * Note: In general regmap cannot be used in atomic context. If you want to use * this macro then first setup your regmap for atomic use (flat or no cache * and MMIO regmap). + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_read + * error return value in case of a error read. In the two former cases, + * the last read value at @addr is stored in @val. */ #define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \ ({ \ @@ -177,17 +184,17 @@ struct reg_sequence { * @field: Regmap field to read from * @val: Unsigned integer variable to read the value into * @cond: Break condition (usually involving @val) - * @sleep_us: Maximum time to sleep between reads in us (0 - * tight-loops). Should be less than ~20ms since usleep_range - * is used (see Documentation/timers/timers-howto.rst). + * @sleep_us: Maximum time to sleep between reads in us (0 tight-loops). Please + * read usleep_range() function description for details and + * limitations. * @timeout_us: Timeout in us, 0 means never timeout * - * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read + * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. + * + * Returns: 0 on success and -ETIMEDOUT upon a timeout or the regmap_field_read * error return value in case of a error read. In the two former cases, * the last read value at @addr is stored in @val. Must not be called * from atomic context if sleep_us or timeout_us are used. - * - * This is modelled after the readx_poll_timeout macros in linux/iopoll.h. */ #define regmap_field_read_poll_timeout(field, val, cond, sleep_us, timeout_us) \ ({ \ @@ -1328,6 +1335,15 @@ static inline int regmap_clear_bits(struct regmap *map, return regmap_update_bits_base(map, reg, bits, 0, NULL, false, false); } +static inline int regmap_assign_bits(struct regmap *map, unsigned int reg, + unsigned int bits, bool value) +{ + if (value) + return regmap_set_bits(map, reg, bits); + else + return regmap_clear_bits(map, reg, bits); +} + int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits); /** @@ -1796,6 +1812,13 @@ static inline int regmap_clear_bits(struct regmap *map, return -EINVAL; } +static inline int regmap_assign_bits(struct regmap *map, unsigned int reg, + unsigned int bits, bool value) +{ + WARN_ONCE(1, "regmap API is disabled"); + return -EINVAL; +} + static inline int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits) { diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index b9ce521910a0..8c3c372ad735 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -168,6 +168,29 @@ int devm_regulator_get_enable_read_voltage(struct device *dev, const char *id); void regulator_put(struct regulator *regulator); void devm_regulator_put(struct regulator *regulator); +#if IS_ENABLED(CONFIG_OF) +struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id); +struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id); +#else +static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} +#endif + int regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id); @@ -350,6 +373,20 @@ devm_regulator_get_optional(struct device *dev, const char *id) return ERR_PTR(-ENODEV); } +static inline struct regulator *__must_check of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + +static inline struct regulator *__must_check devm_of_regulator_get_optional(struct device *dev, + struct device_node *node, + const char *id) +{ + return ERR_PTR(-ENODEV); +} + static inline void regulator_put(struct regulator *regulator) { } diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index f230a472ccd3..5b66caf1695d 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -269,6 +269,11 @@ enum regulator_type { * config but it cannot store it for later usage. * Callback should return 0 on success or negative ERRNO * indicating failure. + * @init_cb: Optional callback called after the parsing of init_data. + * Allows the regulator to perform runtime init if necessary, + * such as synching the regulator and the parsed constraints. + * Callback should return 0 on success or negative ERRNO + * indicating failure. * @id: Numerical identifier for the regulator. * @ops: Regulator operations table. * @irq: Interrupt number for the regulator. @@ -365,6 +370,8 @@ struct regulator_desc { int (*of_parse_cb)(struct device_node *, const struct regulator_desc *, struct regulator_config *); + int (*init_cb)(struct regulator_dev *, + struct regulator_config *); int id; unsigned int continuous_voltage_range:1; unsigned n_voltages; diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 0cd76d264727..b3db09a7429b 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -273,8 +273,6 @@ struct regulator_consumer_supply { * be usable. * @num_consumer_supplies: Number of consumer device supplies. * @consumer_supplies: Consumer device supply configuration. - * - * @regulator_init: Callback invoked when the regulator has been registered. * @driver_data: Data passed to regulator_init. */ struct regulator_init_data { @@ -285,8 +283,7 @@ struct regulator_init_data { int num_consumer_supplies; struct regulator_consumer_supply *consumer_supplies; - /* optional regulator machine specific init */ - int (*regulator_init)(void *driver_data); + /* optional regulator machine specific data */ void *driver_data; /* core does not touch this */ }; diff --git a/include/linux/reset.h b/include/linux/reset.h index 514ddf003efc..2986ced69a02 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h @@ -25,6 +25,48 @@ struct reset_control_bulk_data { struct reset_control *rstc; }; +#define RESET_CONTROL_FLAGS_BIT_SHARED BIT(0) /* not exclusive */ +#define RESET_CONTROL_FLAGS_BIT_OPTIONAL BIT(1) +#define RESET_CONTROL_FLAGS_BIT_ACQUIRED BIT(2) /* iff exclusive, not released */ +#define RESET_CONTROL_FLAGS_BIT_DEASSERTED BIT(3) + +/** + * enum reset_control_flags - Flags that can be passed to the reset_control_get functions + * to determine the type of reset control. + * These values cannot be OR'd. + * + * @RESET_CONTROL_EXCLUSIVE: exclusive, acquired, + * @RESET_CONTROL_EXCLUSIVE_DEASSERTED: exclusive, acquired, deasserted + * @RESET_CONTROL_EXCLUSIVE_RELEASED: exclusive, released, + * @RESET_CONTROL_SHARED: shared + * @RESET_CONTROL_SHARED_DEASSERTED: shared, deasserted + * @RESET_CONTROL_OPTIONAL_EXCLUSIVE: optional, exclusive, acquired + * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED: optional, exclusive, acquired, deasserted + * @RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED: optional, exclusive, released + * @RESET_CONTROL_OPTIONAL_SHARED: optional, shared + * @RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED: optional, shared, deasserted + */ +enum reset_control_flags { + RESET_CONTROL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_ACQUIRED, + RESET_CONTROL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_ACQUIRED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, + RESET_CONTROL_EXCLUSIVE_RELEASED = 0, + RESET_CONTROL_SHARED = RESET_CONTROL_FLAGS_BIT_SHARED, + RESET_CONTROL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_SHARED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, + RESET_CONTROL_OPTIONAL_EXCLUSIVE = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_ACQUIRED, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_ACQUIRED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED = RESET_CONTROL_FLAGS_BIT_OPTIONAL, + RESET_CONTROL_OPTIONAL_SHARED = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_SHARED, + RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED = RESET_CONTROL_FLAGS_BIT_OPTIONAL | + RESET_CONTROL_FLAGS_BIT_SHARED | + RESET_CONTROL_FLAGS_BIT_DEASSERTED, +}; + #ifdef CONFIG_RESET_CONTROLLER int reset_control_reset(struct reset_control *rstc); @@ -42,30 +84,25 @@ int reset_control_bulk_acquire(int num_rstcs, struct reset_control_bulk_data *rs void reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs); struct reset_control *__of_reset_control_get(struct device_node *node, - const char *id, int index, bool shared, - bool optional, bool acquired); + const char *id, int index, enum reset_control_flags flags); struct reset_control *__reset_control_get(struct device *dev, const char *id, - int index, bool shared, - bool optional, bool acquired); + int index, enum reset_control_flags flags); void reset_control_put(struct reset_control *rstc); int __reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired); + enum reset_control_flags flags); void reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs); int __device_reset(struct device *dev, bool optional); struct reset_control *__devm_reset_control_get(struct device *dev, - const char *id, int index, bool shared, - bool optional, bool acquired); + const char *id, int index, enum reset_control_flags flags); int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired); + enum reset_control_flags flags); struct reset_control *devm_reset_control_array_get(struct device *dev, - bool shared, bool optional); -struct reset_control *of_reset_control_array_get(struct device_node *np, - bool shared, bool optional, - bool acquired); + enum reset_control_flags flags); +struct reset_control *of_reset_control_array_get(struct device_node *np, enum reset_control_flags); int reset_control_get_count(struct device *dev); @@ -116,17 +153,19 @@ static inline int __device_reset(struct device *dev, bool optional) static inline struct reset_control *__of_reset_control_get( struct device_node *node, - const char *id, int index, bool shared, - bool optional, bool acquired) + const char *id, int index, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control *__reset_control_get( struct device *dev, const char *id, - int index, bool shared, bool optional, - bool acquired) + int index, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } @@ -162,8 +201,10 @@ reset_control_bulk_release(int num_rstcs, struct reset_control_bulk_data *rstcs) static inline int __reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired) + enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? 0 : -EOPNOTSUPP; } @@ -174,30 +215,36 @@ reset_control_bulk_put(int num_rstcs, struct reset_control_bulk_data *rstcs) static inline struct reset_control *__devm_reset_control_get( struct device *dev, const char *id, - int index, bool shared, bool optional, - bool acquired) + int index, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline int __devm_reset_control_bulk_get(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs, - bool shared, bool optional, bool acquired) + enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? 0 : -EOPNOTSUPP; } static inline struct reset_control * -devm_reset_control_array_get(struct device *dev, bool shared, bool optional) +devm_reset_control_array_get(struct device *dev, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } static inline struct reset_control * -of_reset_control_array_get(struct device_node *np, bool shared, bool optional, - bool acquired) +of_reset_control_array_get(struct device_node *np, enum reset_control_flags flags) { + bool optional = flags & RESET_CONTROL_FLAGS_BIT_OPTIONAL; + return optional ? NULL : ERR_PTR(-ENOTSUPP); } @@ -236,7 +283,7 @@ static inline int device_reset_optional(struct device *dev) static inline struct reset_control * __must_check reset_control_get_exclusive(struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, false, true); + return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE); } /** @@ -253,7 +300,7 @@ static inline int __must_check reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE); } /** @@ -274,7 +321,7 @@ static inline struct reset_control * __must_check reset_control_get_exclusive_released(struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, false, false); + return __reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -295,7 +342,7 @@ static inline int __must_check reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -316,7 +363,8 @@ static inline int __must_check reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED); } /** @@ -344,7 +392,7 @@ reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_r static inline struct reset_control *reset_control_get_shared( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, true, false, false); + return __reset_control_get(dev, id, 0, RESET_CONTROL_SHARED); } /** @@ -361,7 +409,7 @@ static inline int __must_check reset_control_bulk_get_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED); } /** @@ -378,7 +426,7 @@ reset_control_bulk_get_shared(struct device *dev, int num_rstcs, static inline struct reset_control *reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, false, true, true); + return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -398,7 +446,7 @@ static inline int __must_check reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -415,7 +463,7 @@ reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, static inline struct reset_control *reset_control_get_optional_shared( struct device *dev, const char *id) { - return __reset_control_get(dev, id, 0, true, true, false); + return __reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED); } /** @@ -435,7 +483,7 @@ static inline int __must_check reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); + return __reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED); } /** @@ -451,7 +499,7 @@ reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, static inline struct reset_control *of_reset_control_get_exclusive( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, false, false, true); + return __of_reset_control_get(node, id, 0, RESET_CONTROL_EXCLUSIVE); } /** @@ -471,7 +519,7 @@ static inline struct reset_control *of_reset_control_get_exclusive( static inline struct reset_control *of_reset_control_get_optional_exclusive( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, false, true, true); + return __of_reset_control_get(node, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -496,7 +544,7 @@ static inline struct reset_control *of_reset_control_get_optional_exclusive( static inline struct reset_control *of_reset_control_get_shared( struct device_node *node, const char *id) { - return __of_reset_control_get(node, id, 0, true, false, false); + return __of_reset_control_get(node, id, 0, RESET_CONTROL_SHARED); } /** @@ -513,7 +561,7 @@ static inline struct reset_control *of_reset_control_get_shared( static inline struct reset_control *of_reset_control_get_exclusive_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, false, false, true); + return __of_reset_control_get(node, NULL, index, RESET_CONTROL_EXCLUSIVE); } /** @@ -541,7 +589,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index( static inline struct reset_control *of_reset_control_get_shared_by_index( struct device_node *node, int index) { - return __of_reset_control_get(node, NULL, index, true, false, false); + return __of_reset_control_get(node, NULL, index, RESET_CONTROL_SHARED); } /** @@ -560,7 +608,26 @@ static inline struct reset_control * __must_check devm_reset_control_get_exclusive(struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, false, true); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE); +} + +/** + * devm_reset_control_get_exclusive_deasserted - resource managed + * reset_control_get_exclusive() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_exclusive() + reset_control_deassert(). For reset + * controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See reset_control_get_exclusive() for more information. + */ +static inline struct reset_control * __must_check +devm_reset_control_get_exclusive_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_DEASSERTED); } /** @@ -580,7 +647,8 @@ static inline int __must_check devm_reset_control_bulk_get_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, true); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_EXCLUSIVE); } /** @@ -599,7 +667,7 @@ static inline struct reset_control * __must_check devm_reset_control_get_exclusive_released(struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, false, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -619,7 +687,8 @@ static inline int __must_check devm_reset_control_bulk_get_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, false, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_EXCLUSIVE_RELEASED); } /** @@ -638,7 +707,7 @@ static inline struct reset_control * __must_check devm_reset_control_get_optional_exclusive_released(struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, true, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED); } /** @@ -658,7 +727,8 @@ static inline int __must_check devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_OPTIONAL_EXCLUSIVE_RELEASED); } /** @@ -673,7 +743,26 @@ devm_reset_control_bulk_get_optional_exclusive_released(struct device *dev, int static inline struct reset_control *devm_reset_control_get_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, true, false, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED); +} + +/** + * devm_reset_control_get_shared_deasserted - resource managed + * reset_control_get_shared() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_shared() + reset_control_deassert(). For reset + * controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See devm_reset_control_get_shared() for more information. + */ +static inline struct reset_control * __must_check +devm_reset_control_get_shared_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_SHARED_DEASSERTED); } /** @@ -693,7 +782,29 @@ static inline int __must_check devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, false, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_SHARED); +} + +/** + * devm_reset_control_bulk_get_shared_deasserted - resource managed + * reset_control_bulk_get_shared() + + * reset_control_bulk_deassert() + * @dev: device to be reset by the controller + * @num_rstcs: number of entries in rstcs array + * @rstcs: array of struct reset_control_bulk_data with reset line names set + * + * Managed reset_control_bulk_get_shared() + reset_control_bulk_deassert(). For + * reset controllers returned from this function, reset_control_bulk_assert() + + * reset_control_bulk_put() are called automatically on driver detach. + * + * See devm_reset_control_bulk_get_shared() for more information. + */ +static inline int __must_check +devm_reset_control_bulk_get_shared_deasserted(struct device *dev, int num_rstcs, + struct reset_control_bulk_data *rstcs) +{ + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_SHARED_DEASSERTED); } /** @@ -711,7 +822,26 @@ devm_reset_control_bulk_get_shared(struct device *dev, int num_rstcs, static inline struct reset_control *devm_reset_control_get_optional_exclusive( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, false, true, true); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE); +} + +/** + * devm_reset_control_get_optional_exclusive_deasserted - resource managed + * reset_control_get_optional_exclusive() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_optional_exclusive() + reset_control_deassert(). + * For reset controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See devm_reset_control_get_optional_exclusive() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_optional_exclusive_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_EXCLUSIVE_DEASSERTED); } /** @@ -731,7 +861,8 @@ static inline int __must_check devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, false, true, true); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, + RESET_CONTROL_OPTIONAL_EXCLUSIVE); } /** @@ -749,7 +880,26 @@ devm_reset_control_bulk_get_optional_exclusive(struct device *dev, int num_rstcs static inline struct reset_control *devm_reset_control_get_optional_shared( struct device *dev, const char *id) { - return __devm_reset_control_get(dev, id, 0, true, true, false); + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED); +} + +/** + * devm_reset_control_get_optional_shared_deasserted - resource managed + * reset_control_get_optional_shared() + + * reset_control_deassert() + * @dev: device to be reset by the controller + * @id: reset line name + * + * Managed reset_control_get_optional_shared() + reset_control_deassert(). For + * reset controllers returned from this function, reset_control_assert() + + * reset_control_put() is called automatically on driver detach. + * + * See devm_reset_control_get_optional_shared() for more information. + */ +static inline struct reset_control * +devm_reset_control_get_optional_shared_deasserted(struct device *dev, const char *id) +{ + return __devm_reset_control_get(dev, id, 0, RESET_CONTROL_OPTIONAL_SHARED_DEASSERTED); } /** @@ -769,7 +919,7 @@ static inline int __must_check devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, struct reset_control_bulk_data *rstcs) { - return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, true, true, false); + return __devm_reset_control_bulk_get(dev, num_rstcs, rstcs, RESET_CONTROL_OPTIONAL_SHARED); } /** @@ -787,7 +937,7 @@ devm_reset_control_bulk_get_optional_shared(struct device *dev, int num_rstcs, static inline struct reset_control * devm_reset_control_get_exclusive_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, false, false, true); + return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_EXCLUSIVE); } /** @@ -803,7 +953,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index) static inline struct reset_control * devm_reset_control_get_shared_by_index(struct device *dev, int index) { - return __devm_reset_control_get(dev, NULL, index, true, false, false); + return __devm_reset_control_get(dev, NULL, index, RESET_CONTROL_SHARED); } /* @@ -851,54 +1001,54 @@ static inline struct reset_control *devm_reset_control_get_by_index( static inline struct reset_control * devm_reset_control_array_get_exclusive(struct device *dev) { - return devm_reset_control_array_get(dev, false, false); + return devm_reset_control_array_get(dev, RESET_CONTROL_EXCLUSIVE); } static inline struct reset_control * devm_reset_control_array_get_shared(struct device *dev) { - return devm_reset_control_array_get(dev, true, false); + return devm_reset_control_array_get(dev, RESET_CONTROL_SHARED); } static inline struct reset_control * devm_reset_control_array_get_optional_exclusive(struct device *dev) { - return devm_reset_control_array_get(dev, false, true); + return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } static inline struct reset_control * devm_reset_control_array_get_optional_shared(struct device *dev) { - return devm_reset_control_array_get(dev, true, true); + return devm_reset_control_array_get(dev, RESET_CONTROL_OPTIONAL_SHARED); } static inline struct reset_control * of_reset_control_array_get_exclusive(struct device_node *node) { - return of_reset_control_array_get(node, false, false, true); + return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE); } static inline struct reset_control * of_reset_control_array_get_exclusive_released(struct device_node *node) { - return of_reset_control_array_get(node, false, false, false); + return of_reset_control_array_get(node, RESET_CONTROL_EXCLUSIVE_RELEASED); } static inline struct reset_control * of_reset_control_array_get_shared(struct device_node *node) { - return of_reset_control_array_get(node, true, false, true); + return of_reset_control_array_get(node, RESET_CONTROL_SHARED); } static inline struct reset_control * of_reset_control_array_get_optional_exclusive(struct device_node *node) { - return of_reset_control_array_get(node, false, true, true); + return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_EXCLUSIVE); } static inline struct reset_control * of_reset_control_array_get_optional_shared(struct device_node *node) { - return of_reset_control_array_get(node, true, true, true); + return of_reset_control_array_get(node, RESET_CONTROL_OPTIONAL_SHARED); } #endif diff --git a/include/linux/rmap.h b/include/linux/rmap.h index d5e93e44322e..683a04088f3f 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -171,7 +171,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma, unlink_anon_vmas(next); } -struct anon_vma *folio_get_anon_vma(struct folio *folio); +struct anon_vma *folio_get_anon_vma(const struct folio *folio); /* RMAP flags, currently only relevant for some anon rmap operations. */ typedef int __bitwise rmap_t; @@ -194,8 +194,8 @@ enum rmap_level { RMAP_LEVEL_PMD, }; -static inline void __folio_rmap_sanity_checks(struct folio *folio, - struct page *page, int nr_pages, enum rmap_level level) +static inline void __folio_rmap_sanity_checks(const struct folio *folio, + const struct page *page, int nr_pages, enum rmap_level level) { /* hugetlb folios are handled separately. */ VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); @@ -728,11 +728,8 @@ page_vma_mapped_walk_restart(struct page_vma_mapped_walk *pvmw) } bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw); - -/* - * Used by swapoff to help locate where page is expected in vma. - */ -unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); +unsigned long page_address_in_vma(const struct folio *folio, + const struct page *, const struct vm_area_struct *); /* * Cleans the PTEs of shared mappings. @@ -774,14 +771,14 @@ struct rmap_walk_control { bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma, unsigned long addr, void *arg); int (*done)(struct folio *folio); - struct anon_vma *(*anon_lock)(struct folio *folio, + struct anon_vma *(*anon_lock)(const struct folio *folio, struct rmap_walk_control *rwc); bool (*invalid_vma)(struct vm_area_struct *vma, void *arg); }; void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc); void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc); -struct anon_vma *folio_lock_anon_vma_read(struct folio *folio, +struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, struct rmap_walk_control *rwc); #else /* !CONFIG_MMU */ diff --git a/include/linux/rtc/m48t59.h b/include/linux/rtc/m48t59.h index 9465d5405fe2..373ba77071c6 100644 --- a/include/linux/rtc/m48t59.h +++ b/include/linux/rtc/m48t59.h @@ -56,6 +56,9 @@ struct m48t59_plat_data { void __iomem *ioaddr; /* offset to RTC registers, automatically set according to the type */ unsigned int offset; + + /* YY digits (in RTC) are offset, i.e. year is 1900 + yy_offset + YY */ + int yy_offset; }; #endif /* _LINUX_RTC_M48T59_H_ */ diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index a7da7dfc06a2..14b88f551920 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -7,7 +7,6 @@ #include <linux/netdevice.h> #include <linux/wait.h> #include <linux/refcount.h> -#include <linux/cleanup.h> #include <uapi/linux/rtnetlink.h> extern int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, u32 group, int echo); @@ -47,13 +46,15 @@ extern int rtnl_is_locked(void); extern int rtnl_lock_killable(void); extern bool refcount_dec_and_rtnl_lock(refcount_t *r); -DEFINE_LOCK_GUARD_0(rtnl, rtnl_lock(), rtnl_unlock()) - extern wait_queue_head_t netdev_unregistering_wq; extern atomic_t dev_unreg_count; extern struct rw_semaphore pernet_ops_rwsem; extern struct rw_semaphore net_rwsem; +#define ASSERT_RTNL() \ + WARN_ONCE(!rtnl_is_locked(), \ + "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) + #ifdef CONFIG_PROVE_LOCKING extern bool lockdep_rtnl_is_held(void); #else @@ -95,6 +96,61 @@ static inline bool lockdep_rtnl_is_held(void) #define rcu_replace_pointer_rtnl(rp, p) \ rcu_replace_pointer(rp, p, lockdep_rtnl_is_held()) +#ifdef CONFIG_DEBUG_NET_SMALL_RTNL +void __rtnl_net_lock(struct net *net); +void __rtnl_net_unlock(struct net *net); +void rtnl_net_lock(struct net *net); +void rtnl_net_unlock(struct net *net); +int rtnl_net_trylock(struct net *net); +int rtnl_net_lock_cmp_fn(const struct lockdep_map *a, const struct lockdep_map *b); + +bool rtnl_net_is_locked(struct net *net); + +#define ASSERT_RTNL_NET(net) \ + WARN_ONCE(!rtnl_net_is_locked(net), \ + "RTNL_NET: assertion failed at %s (%d)\n", \ + __FILE__, __LINE__) + +bool lockdep_rtnl_net_is_held(struct net *net); + +#define rcu_dereference_rtnl_net(net, p) \ + rcu_dereference_check(p, lockdep_rtnl_net_is_held(net)) +#define rtnl_net_dereference(net, p) \ + rcu_dereference_protected(p, lockdep_rtnl_net_is_held(net)) +#define rcu_replace_pointer_rtnl_net(net, rp, p) \ + rcu_replace_pointer(rp, p, lockdep_rtnl_net_is_held(net)) +#else +static inline void __rtnl_net_lock(struct net *net) {} +static inline void __rtnl_net_unlock(struct net *net) {} + +static inline void rtnl_net_lock(struct net *net) +{ + rtnl_lock(); +} + +static inline void rtnl_net_unlock(struct net *net) +{ + rtnl_unlock(); +} + +static inline int rtnl_net_trylock(struct net *net) +{ + return rtnl_trylock(); +} + +static inline void ASSERT_RTNL_NET(struct net *net) +{ + ASSERT_RTNL(); +} + +#define rcu_dereference_rtnl_net(net, p) \ + rcu_dereference_rtnl(p) +#define rtnl_net_dereference(net, p) \ + rtnl_dereference(p) +#define rcu_replace_pointer_rtnl_net(net, rp, p) \ + rcu_replace_pointer_rtnl(rp, p) +#endif + static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) { return rtnl_dereference(dev->ingress_queue); @@ -122,10 +178,6 @@ void rtnetlink_init(void); void __rtnl_unlock(void); void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); -#define ASSERT_RTNL() \ - WARN_ONCE(!rtnl_is_locked(), \ - "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) - extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, diff --git a/include/linux/rtsx_common.h b/include/linux/rtsx_common.h index bf290ad14c57..da9c8c6b5d50 100644 --- a/include/linux/rtsx_common.h +++ b/include/linux/rtsx_common.h @@ -12,7 +12,6 @@ #define DRV_NAME_RTSX_PCI "rtsx_pci" #define DRV_NAME_RTSX_PCI_SDMMC "rtsx_pci_sdmmc" -#define DRV_NAME_RTSX_PCI_MS "rtsx_pci_ms" #define RTSX_REG_PAIR(addr, val) (((u32)(addr) << 16) | (u8)(val)) diff --git a/include/linux/rtsx_usb.h b/include/linux/rtsx_usb.h index 3247ed8e9ff0..f267a06c6b1e 100644 --- a/include/linux/rtsx_usb.h +++ b/include/linux/rtsx_usb.h @@ -12,6 +12,10 @@ #include <linux/usb.h> +#define DRV_NAME_RTSX_USB "rtsx_usb" +#define DRV_NAME_RTSX_USB_SDMMC "rtsx_usb_sdmmc" +#define DRV_NAME_RTSX_USB_MS "rtsx_usb_ms" + /* related module names */ #define RTSX_USB_SD_CARD 0 #define RTSX_USB_MS_CARD 1 diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h index c0ef596f340b..5b87c6f4a243 100644 --- a/include/linux/rwlock.h +++ b/include/linux/rwlock.h @@ -2,7 +2,7 @@ #define __LINUX_RWLOCK_H #ifndef __LINUX_INSIDE_SPINLOCK_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index dceb0a59b692..31d3d1116323 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h @@ -2,7 +2,7 @@ #define __LINUX_RWLOCK_API_SMP_H #ifndef __LINUX_SPINLOCK_API_SMP_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h index 8544ff05e594..7d81fc6918ee 100644 --- a/include/linux/rwlock_rt.h +++ b/include/linux/rwlock_rt.h @@ -24,13 +24,13 @@ do { \ __rt_rwlock_init(rwl, #rwl, &__key); \ } while (0) -extern void rt_read_lock(rwlock_t *rwlock); +extern void rt_read_lock(rwlock_t *rwlock) __acquires(rwlock); extern int rt_read_trylock(rwlock_t *rwlock); -extern void rt_read_unlock(rwlock_t *rwlock); -extern void rt_write_lock(rwlock_t *rwlock); -extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass); +extern void rt_read_unlock(rwlock_t *rwlock) __releases(rwlock); +extern void rt_write_lock(rwlock_t *rwlock) __acquires(rwlock); +extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass) __acquires(rwlock); extern int rt_write_trylock(rwlock_t *rwlock); -extern void rt_write_unlock(rwlock_t *rwlock); +extern void rt_write_unlock(rwlock_t *rwlock) __releases(rwlock); static __always_inline void read_lock(rwlock_t *rwlock) { diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index e61d164622db..c5e2239b550e 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -273,7 +273,7 @@ static inline void sg_unmark_end(struct scatterlist *sg) } /* - * One 64-bit architectures there is a 4-byte padding in struct scatterlist + * On 64-bit architectures there is a 4-byte padding in struct scatterlist * (assuming also CONFIG_NEED_SG_DMA_LENGTH is set). Use this padding for DMA * flags bits to indicate when a specific dma address is a bus address or the * buffer may have been bounced via SWIOTLB. diff --git a/include/linux/sched.h b/include/linux/sched.h index bb343136ddd0..d380bffee2ef 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1001,7 +1001,7 @@ struct task_struct { #ifdef CONFIG_ARCH_HAS_CPU_PASID unsigned pasid_activated:1; #endif -#ifdef CONFIG_CPU_SUP_INTEL +#ifdef CONFIG_X86_BUS_LOCK_DETECT unsigned reported_split_lock:1; #endif #ifdef CONFIG_TASK_DELAY_ACCT @@ -1121,9 +1121,12 @@ struct task_struct { /* * executable name, excluding path. * - * - normally initialized setup_new_exec() - * - access it with [gs]et_task_comm() - * - lock it with task_lock() + * - normally initialized begin_new_exec() + * - set it with set_task_comm() + * - strscpy_pad() to ensure it is always NUL-terminated and + * zero-padded + * - task_lock() to ensure the operation is atomic and the name is + * fully updated. */ char comm[TASK_COMM_LEN]; @@ -1441,6 +1444,7 @@ struct task_struct { /* Timestamp for last schedule: */ unsigned long long ftrace_timestamp; + unsigned long long ftrace_sleeptime; /* * Number of functions that haven't been traced @@ -1898,7 +1902,7 @@ extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)]; #ifdef CONFIG_THREAD_INFO_IN_TASK # define task_thread_info(task) (&(task)->thread_info) -#elif !defined(__HAVE_THREAD_FUNCTIONS) +#else # define task_thread_info(task) ((struct thread_info *)(task)->stack) #endif @@ -1938,10 +1942,23 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from) __set_task_comm(tsk, from, false); } -extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); +/* + * - Why not use task_lock()? + * User space can randomly change their names anyway, so locking for readers + * doesn't make sense. For writers, locking is probably necessary, as a race + * condition could lead to long-term mixed results. + * The strscpy_pad() in __set_task_comm() can ensure that the task comm is + * always NUL-terminated and zero-padded. Therefore the race condition between + * reader and writer is not an issue. + * + * - BUILD_BUG_ON() can help prevent the buf from being truncated. + * Since the callers don't perform any return value checks, this safeguard is + * necessary. + */ #define get_task_comm(buf, tsk) ({ \ - BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ - __get_task_comm(buf, sizeof(buf), tsk); \ + BUILD_BUG_ON(sizeof(buf) < TASK_COMM_LEN); \ + strscpy_pad(buf, (tsk)->comm); \ + buf; \ }) #ifdef CONFIG_SMP @@ -2002,7 +2019,8 @@ static inline void set_tsk_need_resched(struct task_struct *tsk) static inline void clear_tsk_need_resched(struct task_struct *tsk) { - clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED); + atomic_long_andnot(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY, + (atomic_long_t *)&task_thread_info(tsk)->flags); } static inline int test_tsk_need_resched(struct task_struct *tsk) diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index e62ff805cfc9..6eb65ceed213 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h @@ -8,12 +8,6 @@ #define SUID_DUMP_USER 1 /* Dump as user of process */ #define SUID_DUMP_ROOT 2 /* Dump as root */ -/* mm flags */ - -/* for SUID_DUMP_* above */ -#define MMF_DUMPABLE_BITS 2 -#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1) - extern void set_dumpable(struct mm_struct *mm, int value); /* * This returns the actual value of the suid_dumpable flag. For things @@ -31,80 +25,4 @@ static inline int get_dumpable(struct mm_struct *mm) return __get_dumpable(mm->flags); } -/* coredump filter bits */ -#define MMF_DUMP_ANON_PRIVATE 2 -#define MMF_DUMP_ANON_SHARED 3 -#define MMF_DUMP_MAPPED_PRIVATE 4 -#define MMF_DUMP_MAPPED_SHARED 5 -#define MMF_DUMP_ELF_HEADERS 6 -#define MMF_DUMP_HUGETLB_PRIVATE 7 -#define MMF_DUMP_HUGETLB_SHARED 8 -#define MMF_DUMP_DAX_PRIVATE 9 -#define MMF_DUMP_DAX_SHARED 10 - -#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS -#define MMF_DUMP_FILTER_BITS 9 -#define MMF_DUMP_FILTER_MASK \ - (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT) -#define MMF_DUMP_FILTER_DEFAULT \ - ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\ - (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF) - -#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS -# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS) -#else -# define MMF_DUMP_MASK_DEFAULT_ELF 0 -#endif - /* leave room for more dump flags */ -#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ -#define MMF_VM_HUGEPAGE 17 /* set when mm is available for - khugepaged */ -/* - * This one-shot flag is dropped due to necessity of changing exe once again - * on NFS restore - */ -//#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ - -#define MMF_HAS_UPROBES 19 /* has uprobes */ -#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ -#define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ -#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ -#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ -#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ -#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) -#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */ -#define MMF_MULTIPROCESS 26 /* mm is shared between processes */ -/* - * MMF_HAS_PINNED: Whether this mm has pinned any pages. This can be either - * replaced in the future by mm.pinned_vm when it becomes stable, or grow into - * a counter on its own. We're aggresive on this bit for now: even if the - * pinned pages were unpinned later on, we'll still keep this bit set for the - * lifecycle of this mm, just for simplicity. - */ -#define MMF_HAS_PINNED 27 /* FOLL_PIN has run, never cleared */ - -#define MMF_HAS_MDWE 28 -#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE) - - -#define MMF_HAS_MDWE_NO_INHERIT 29 - -#define MMF_VM_MERGE_ANY 30 -#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY) - -#define MMF_TOPDOWN 31 /* mm searches top down by default */ -#define MMF_TOPDOWN_MASK (1 << MMF_TOPDOWN) - -#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ - MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\ - MMF_VM_MERGE_ANY_MASK | MMF_TOPDOWN_MASK) - -static inline unsigned long mmf_init_flags(unsigned long flags) -{ - if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT)) - flags &= ~((1UL << MMF_HAS_MDWE) | - (1UL << MMF_HAS_MDWE_NO_INHERIT)); - return flags & MMF_INIT_MASK; -} - #endif /* _LINUX_SCHED_COREDUMP_H */ diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index 1ddbde64a31b..1d70a9867fb1 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -199,17 +199,18 @@ struct sched_ext_entity { #ifdef CONFIG_EXT_GROUP_SCHED struct cgroup *cgrp_moving_from; #endif - /* must be the last field, see init_scx_entity() */ struct list_head tasks_node; }; void sched_ext_free(struct task_struct *p); void print_scx_info(const char *log_lvl, struct task_struct *p); +void scx_softlockup(u32 dur_s); #else /* !CONFIG_SCHED_CLASS_EXT */ static inline void sched_ext_free(struct task_struct *p) {} static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {} +static inline void scx_softlockup(u32 dur_s) {} #endif /* CONFIG_SCHED_CLASS_EXT */ #endif /* _LINUX_SCHED_EXT_H */ diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index c8ed09ac29ac..d5d03d919df8 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -138,6 +138,7 @@ struct signal_struct { /* POSIX.1b Interval Timers */ unsigned int next_posix_timer_id; struct hlist_head posix_timers; + struct hlist_head ignored_posix_timers; /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; @@ -338,9 +339,6 @@ extern void force_fatal_sig(int); extern void force_exit_sig(int); extern int send_sig(int, struct task_struct *, int); extern int zap_other_threads(struct task_struct *p); -extern struct sigqueue *sigqueue_alloc(void); -extern void sigqueue_free(struct sigqueue *); -extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type); extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); static inline void clear_notify_signal(void) diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index 6c2fef89a4fd..cffad65bdc6a 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -34,7 +34,7 @@ static __always_inline unsigned long *end_of_stack(const struct task_struct *tas #endif } -#elif !defined(__HAVE_THREAD_FUNCTIONS) +#else #define task_stack_page(task) ((void *)(task)->stack) diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 709ad84809e1..341980599c71 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -32,6 +32,11 @@ static inline int secure_computing(void) } #else extern void secure_computing_strict(int this_syscall); +static inline int __secure_computing(const struct seccomp_data *sd) +{ + secure_computing_strict(sd->nr); + return 0; +} #endif extern long prctl_get_seccomp(void); diff --git a/include/linux/security.h b/include/linux/security.h index 2ec8f3014757..cbdba435b798 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -34,6 +34,10 @@ #include <linux/sockptr.h> #include <linux/bpf.h> #include <uapi/linux/lsm.h> +#include <linux/lsm/selinux.h> +#include <linux/lsm/smack.h> +#include <linux/lsm/apparmor.h> +#include <linux/lsm/bpf.h> struct linux_binprm; struct cred; @@ -152,6 +156,16 @@ enum lockdown_reason { LOCKDOWN_CONFIDENTIALITY_MAX, }; +/* + * Data exported by the security modules + */ +struct lsm_prop { + struct lsm_prop_selinux selinux; + struct lsm_prop_smack smack; + struct lsm_prop_apparmor apparmor; + struct lsm_prop_bpf bpf; +}; + extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1]; extern u32 lsm_active_cnt; extern const struct lsm_id *lsm_idlist[]; @@ -269,8 +283,32 @@ static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id) return kernel_load_data_str[id]; } +/** + * lsmprop_init - initialize a lsm_prop structure + * @prop: Pointer to the data to initialize + * + * Set all secid for all modules to the specified value. + */ +static inline void lsmprop_init(struct lsm_prop *prop) +{ + memset(prop, 0, sizeof(*prop)); +} + #ifdef CONFIG_SECURITY +/** + * lsmprop_is_set - report if there is a value in the lsm_prop + * @prop: Pointer to the exported LSM data + * + * Returns true if there is a value set, false otherwise + */ +static inline bool lsmprop_is_set(struct lsm_prop *prop) +{ + const struct lsm_prop empty = {}; + + return !!memcmp(prop, &empty, sizeof(*prop)); +} + int call_blocking_lsm_notifier(enum lsm_event event, void *data); int register_blocking_lsm_notifier(struct notifier_block *nb); int unregister_blocking_lsm_notifier(struct notifier_block *nb); @@ -408,7 +446,7 @@ int security_inode_getsecurity(struct mnt_idmap *idmap, void **buffer, bool alloc); int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags); int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer_size); -void security_inode_getsecid(struct inode *inode, u32 *secid); +void security_inode_getlsmprop(struct inode *inode, struct lsm_prop *prop); int security_inode_copy_up(struct dentry *src, struct cred **new); int security_inode_copy_up_xattr(struct dentry *src, const char *name); int security_inode_setintegrity(const struct inode *inode, @@ -444,6 +482,7 @@ void security_cred_free(struct cred *cred); int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); void security_cred_getsecid(const struct cred *c, u32 *secid); +void security_cred_getlsmprop(const struct cred *c, struct lsm_prop *prop); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); int security_kernel_module_request(char *kmod_name); @@ -463,8 +502,8 @@ int security_task_fix_setgroups(struct cred *new, const struct cred *old); int security_task_setpgid(struct task_struct *p, pid_t pgid); int security_task_getpgid(struct task_struct *p); int security_task_getsid(struct task_struct *p); -void security_current_getsecid_subj(u32 *secid); -void security_task_getsecid_obj(struct task_struct *p, u32 *secid); +void security_current_getlsmprop_subj(struct lsm_prop *prop); +void security_task_getlsmprop_obj(struct task_struct *p, struct lsm_prop *prop); int security_task_setnice(struct task_struct *p, int nice); int security_task_setioprio(struct task_struct *p, int ioprio); int security_task_getioprio(struct task_struct *p); @@ -482,7 +521,7 @@ int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, void security_task_to_inode(struct task_struct *p, struct inode *inode); int security_create_user_ns(const struct cred *cred); int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag); -void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid); +void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, struct lsm_prop *prop); int security_msg_msg_alloc(struct msg_msg *msg); void security_msg_msg_free(struct msg_msg *msg); int security_msg_queue_alloc(struct kern_ipc_perm *msq); @@ -515,6 +554,7 @@ int security_setprocattr(int lsmid, const char *name, void *value, size_t size); int security_netlink_send(struct sock *sk, struct sk_buff *skb); int security_ismaclabel(const char *name); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); +int security_lsmprop_to_secctx(struct lsm_prop *prop, char **secdata, u32 *seclen); int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid); void security_release_secctx(char *secdata, u32 seclen); void security_inode_invalidate_secctx(struct inode *inode); @@ -531,6 +571,17 @@ int security_bdev_setintegrity(struct block_device *bdev, size_t size); #else /* CONFIG_SECURITY */ +/** + * lsmprop_is_set - report if there is a value in the lsm_prop + * @prop: Pointer to the exported LSM data + * + * Returns true if there is a value set, false otherwise + */ +static inline bool lsmprop_is_set(struct lsm_prop *prop) +{ + return false; +} + static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) { return 0; @@ -1020,9 +1071,10 @@ static inline int security_inode_listsecurity(struct inode *inode, char *buffer, return 0; } -static inline void security_inode_getsecid(struct inode *inode, u32 *secid) +static inline void security_inode_getlsmprop(struct inode *inode, + struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } static inline int security_inode_copy_up(struct dentry *src, struct cred **new) @@ -1172,6 +1224,10 @@ static inline void security_cred_getsecid(const struct cred *c, u32 *secid) *secid = 0; } +static inline void security_cred_getlsmprop(const struct cred *c, + struct lsm_prop *prop) +{ } + static inline int security_kernel_act_as(struct cred *cred, u32 secid) { return 0; @@ -1249,14 +1305,15 @@ static inline int security_task_getsid(struct task_struct *p) return 0; } -static inline void security_current_getsecid_subj(u32 *secid) +static inline void security_current_getlsmprop_subj(struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } -static inline void security_task_getsecid_obj(struct task_struct *p, u32 *secid) +static inline void security_task_getlsmprop_obj(struct task_struct *p, + struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } static inline int security_task_setnice(struct task_struct *p, int nice) @@ -1332,9 +1389,10 @@ static inline int security_ipc_permission(struct kern_ipc_perm *ipcp, return 0; } -static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid) +static inline void security_ipc_getlsmprop(struct kern_ipc_perm *ipcp, + struct lsm_prop *prop) { - *secid = 0; + lsmprop_init(prop); } static inline int security_msg_msg_alloc(struct msg_msg *msg) @@ -1468,7 +1526,14 @@ static inline int security_ismaclabel(const char *name) return 0; } -static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) +static inline int security_secid_to_secctx(u32 secid, char **secdata, + u32 *seclen) +{ + return -EOPNOTSUPP; +} + +static inline int security_lsmprop_to_secctx(struct lsm_prop *prop, + char **secdata, u32 *seclen) { return -EOPNOTSUPP; } @@ -2095,7 +2160,8 @@ static inline void security_key_post_create_or_update(struct key *keyring, int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule, gfp_t gfp); int security_audit_rule_known(struct audit_krule *krule); -int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); +int security_audit_rule_match(struct lsm_prop *prop, u32 field, u32 op, + void *lsmrule); void security_audit_rule_free(void *lsmrule); #else @@ -2111,8 +2177,8 @@ static inline int security_audit_rule_known(struct audit_krule *krule) return 0; } -static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, - void *lsmrule) +static inline int security_audit_rule_match(struct lsm_prop *prop, u32 field, + u32 op, void *lsmrule) { return 0; } diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h index 2ac50822554e..80f33a93f944 100644 --- a/include/linux/sed-opal.h +++ b/include/linux/sed-opal.h @@ -52,6 +52,7 @@ static inline bool is_sed_ioctl(unsigned int cmd) case IOC_OPAL_GET_GEOMETRY: case IOC_OPAL_DISCOVERY: case IOC_OPAL_REVERT_LSP: + case IOC_OPAL_SET_SID_PW: return true; } return false; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index fffeb754880f..5298765d6ca4 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -622,6 +622,23 @@ static __always_inline unsigned raw_read_seqcount_latch(const seqcount_latch_t * } /** + * read_seqcount_latch() - pick even/odd latch data copy + * @s: Pointer to seqcount_latch_t + * + * See write_seqcount_latch() for details and a full reader/writer usage + * example. + * + * Return: sequence counter raw value. Use the lowest bit as an index for + * picking which data copy to read. The full counter must then be checked + * with read_seqcount_latch_retry(). + */ +static __always_inline unsigned read_seqcount_latch(const seqcount_latch_t *s) +{ + kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); + return raw_read_seqcount_latch(s); +} + +/** * raw_read_seqcount_latch_retry() - end a seqcount_latch_t read section * @s: Pointer to seqcount_latch_t * @start: count, from raw_read_seqcount_latch() @@ -636,8 +653,33 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) } /** + * read_seqcount_latch_retry() - end a seqcount_latch_t read section + * @s: Pointer to seqcount_latch_t + * @start: count, from read_seqcount_latch() + * + * Return: true if a read section retry is required, else false + */ +static __always_inline int +read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) +{ + kcsan_atomic_next(0); + return raw_read_seqcount_latch_retry(s, start); +} + +/** * raw_write_seqcount_latch() - redirect latch readers to even/odd copy * @s: Pointer to seqcount_latch_t + */ +static __always_inline void raw_write_seqcount_latch(seqcount_latch_t *s) +{ + smp_wmb(); /* prior stores before incrementing "sequence" */ + s->seqcount.sequence++; + smp_wmb(); /* increment "sequence" before following stores */ +} + +/** + * write_seqcount_latch_begin() - redirect latch readers to odd copy + * @s: Pointer to seqcount_latch_t * * The latch technique is a multiversion concurrency control method that allows * queries during non-atomic modifications. If you can guarantee queries never @@ -665,17 +707,11 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * * void latch_modify(struct latch_struct *latch, ...) * { - * smp_wmb(); // Ensure that the last data[1] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible - * + * write_seqcount_latch_begin(&latch->seq); * modify(latch->data[0], ...); - * - * smp_wmb(); // Ensure that the data[0] update is visible - * latch->seq.sequence++; - * smp_wmb(); // Ensure that the seqcount update is visible - * + * write_seqcount_latch(&latch->seq); * modify(latch->data[1], ...); + * write_seqcount_latch_end(&latch->seq); * } * * The query will have a form like:: @@ -686,13 +722,13 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * unsigned seq, idx; * * do { - * seq = raw_read_seqcount_latch(&latch->seq); + * seq = read_seqcount_latch(&latch->seq); * * idx = seq & 0x01; * entry = data_query(latch->data[idx], ...); * * // This includes needed smp_rmb() - * } while (raw_read_seqcount_latch_retry(&latch->seq, seq)); + * } while (read_seqcount_latch_retry(&latch->seq, seq)); * * return entry; * } @@ -716,11 +752,31 @@ raw_read_seqcount_latch_retry(const seqcount_latch_t *s, unsigned start) * When data is a dynamic data structure; one should use regular RCU * patterns to manage the lifetimes of the objects within. */ -static inline void raw_write_seqcount_latch(seqcount_latch_t *s) +static __always_inline void write_seqcount_latch_begin(seqcount_latch_t *s) { - smp_wmb(); /* prior stores before incrementing "sequence" */ - s->seqcount.sequence++; - smp_wmb(); /* increment "sequence" before following stores */ + kcsan_nestable_atomic_begin(); + raw_write_seqcount_latch(s); +} + +/** + * write_seqcount_latch() - redirect latch readers to even copy + * @s: Pointer to seqcount_latch_t + */ +static __always_inline void write_seqcount_latch(seqcount_latch_t *s) +{ + raw_write_seqcount_latch(s); +} + +/** + * write_seqcount_latch_end() - end a seqcount_latch_t write section + * @s: Pointer to seqcount_latch_t + * + * Marks the end of a seqcount_latch_t writer section, after all copies of the + * latch-protected data have been updated. + */ +static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s) +{ + kcsan_nestable_atomic_end(); } #define __SEQLOCK_UNLOCKED(lockname) \ @@ -754,11 +810,7 @@ static inline void raw_write_seqcount_latch(seqcount_latch_t *s) */ static inline unsigned read_seqbegin(const seqlock_t *sl) { - unsigned ret = read_seqcount_begin(&sl->seqcount); - - kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ - kcsan_flat_atomic_begin(); - return ret; + return read_seqcount_begin(&sl->seqcount); } /** @@ -774,12 +826,6 @@ static inline unsigned read_seqbegin(const seqlock_t *sl) */ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { - /* - * Assume not nested: read_seqretry() may be called multiple times when - * completing read critical section. - */ - kcsan_flat_atomic_end(); - return read_seqcount_retry(&sl->seqcount, start); } diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 4ab65874a850..743b4afaad4c 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -505,7 +505,11 @@ struct uart_port { * The remaining bits are serial-core specific and not modifiable by * userspace. */ +#ifdef CONFIG_HAS_IOPORT #define UPF_FOURPORT ((__force upf_t) ASYNC_FOURPORT /* 1 */ ) +#else +#define UPF_FOURPORT 0 +#endif #define UPF_SAK ((__force upf_t) ASYNC_SAK /* 2 */ ) #define UPF_SPD_HI ((__force upf_t) ASYNC_SPD_HI /* 4 */ ) #define UPF_SPD_VHI ((__force upf_t) ASYNC_SPD_VHI /* 5 */ ) diff --git a/include/linux/serio.h b/include/linux/serio.h index bf2191f25350..69a47674af65 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h @@ -6,6 +6,7 @@ #define _SERIO_H +#include <linux/cleanup.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> @@ -161,4 +162,6 @@ static inline void serio_continue_rx(struct serio *serio) spin_unlock_irq(&serio->lock); } +DEFINE_GUARD(serio_pause_rx, struct serio *, serio_pause_rx(_T), serio_continue_rx(_T)) + #endif diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index e7aec20fb44f..3030d9245f5a 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -34,6 +34,12 @@ static inline int set_direct_map_default_noflush(struct page *page) return 0; } +static inline int set_direct_map_valid_noflush(struct page *page, + unsigned nr, bool valid) +{ + return 0; +} + static inline bool kernel_page_present(struct page *page) { return true; diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 515a9a6a3c6f..0b273a7b9f01 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -42,10 +42,10 @@ struct shmem_inode_info { struct inode vfs_inode; }; -#define SHMEM_FL_USER_VISIBLE FS_FL_USER_VISIBLE +#define SHMEM_FL_USER_VISIBLE (FS_FL_USER_VISIBLE | FS_CASEFOLD_FL) #define SHMEM_FL_USER_MODIFIABLE \ - (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL) -#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL) + (FS_IMMUTABLE_FL | FS_APPEND_FL | FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL) +#define SHMEM_FL_INHERITED (FS_NODUMP_FL | FS_NOATIME_FL | FS_CASEFOLD_FL) struct shmem_quota_limits { qsize_t usrquota_bhardlimit; /* Default user quota block hard limit */ @@ -114,6 +114,7 @@ int shmem_unuse(unsigned int type); unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, loff_t write_end, bool shmem_huge_force); +bool shmem_hpage_pmd_enabled(void); #else static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, @@ -121,6 +122,11 @@ static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, { return 0; } + +static inline bool shmem_hpage_pmd_enabled(void) +{ + return false; +} #endif #ifdef CONFIG_SHMEM diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 39f1d16f3628..58009fa66102 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -31,6 +31,7 @@ #include <linux/in6.h> #include <linux/if_packet.h> #include <linux/llist.h> +#include <linux/page_frag_cache.h> #include <net/flow.h> #if IS_ENABLED(CONFIG_NF_CONNTRACK) #include <linux/netfilter/nf_conntrack_common.h> @@ -2681,6 +2682,12 @@ static inline void skb_assert_len(struct sk_buff *skb) #endif /* CONFIG_DEBUG_NET */ } +#if defined(CONFIG_FAIL_SKB_REALLOC) +void skb_might_realloc(struct sk_buff *skb); +#else +static inline void skb_might_realloc(struct sk_buff *skb) {} +#endif + /* * Add data to an sk_buff */ @@ -2781,6 +2788,7 @@ static inline enum skb_drop_reason pskb_may_pull_reason(struct sk_buff *skb, unsigned int len) { DEBUG_NET_WARN_ON_ONCE(len > INT_MAX); + skb_might_realloc(skb); if (likely(len <= skb_headlen(skb))) return SKB_NOT_DROPPED_YET; @@ -2909,9 +2917,19 @@ static inline void skb_reset_inner_headers(struct sk_buff *skb) skb->inner_transport_header = skb->transport_header; } +static inline int skb_mac_header_was_set(const struct sk_buff *skb) +{ + return skb->mac_header != (typeof(skb->mac_header))~0U; +} + static inline void skb_reset_mac_len(struct sk_buff *skb) { - skb->mac_len = skb->network_header - skb->mac_header; + if (!skb_mac_header_was_set(skb)) { + DEBUG_NET_WARN_ON_ONCE(1); + skb->mac_len = 0; + } else { + skb->mac_len = skb->network_header - skb->mac_header; + } } static inline unsigned char *skb_inner_transport_header(const struct sk_buff @@ -2927,7 +2945,10 @@ static inline int skb_inner_transport_offset(const struct sk_buff *skb) static inline void skb_reset_inner_transport_header(struct sk_buff *skb) { - skb->inner_transport_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_transport_header))offset); + skb->inner_transport_header = offset; } static inline void skb_set_inner_transport_header(struct sk_buff *skb, @@ -2944,7 +2965,10 @@ static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb) static inline void skb_reset_inner_network_header(struct sk_buff *skb) { - skb->inner_network_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_network_header))offset); + skb->inner_network_header = offset; } static inline void skb_set_inner_network_header(struct sk_buff *skb, @@ -2966,7 +2990,10 @@ static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb) static inline void skb_reset_inner_mac_header(struct sk_buff *skb) { - skb->inner_mac_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->inner_mac_header))offset); + skb->inner_mac_header = offset; } static inline void skb_set_inner_mac_header(struct sk_buff *skb, @@ -2988,7 +3015,10 @@ static inline unsigned char *skb_transport_header(const struct sk_buff *skb) static inline void skb_reset_transport_header(struct sk_buff *skb) { - skb->transport_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->transport_header))offset); + skb->transport_header = offset; } static inline void skb_set_transport_header(struct sk_buff *skb, @@ -3005,7 +3035,10 @@ static inline unsigned char *skb_network_header(const struct sk_buff *skb) static inline void skb_reset_network_header(struct sk_buff *skb) { - skb->network_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->network_header))offset); + skb->network_header = offset; } static inline void skb_set_network_header(struct sk_buff *skb, const int offset) @@ -3014,11 +3047,6 @@ static inline void skb_set_network_header(struct sk_buff *skb, const int offset) skb->network_header += offset; } -static inline int skb_mac_header_was_set(const struct sk_buff *skb) -{ - return skb->mac_header != (typeof(skb->mac_header))~0U; -} - static inline unsigned char *skb_mac_header(const struct sk_buff *skb) { DEBUG_NET_WARN_ON_ONCE(!skb_mac_header_was_set(skb)); @@ -3043,7 +3071,10 @@ static inline void skb_unset_mac_header(struct sk_buff *skb) static inline void skb_reset_mac_header(struct sk_buff *skb) { - skb->mac_header = skb->data - skb->head; + long offset = skb->data - skb->head; + + DEBUG_NET_WARN_ON_ONCE(offset != (typeof(skb->mac_header))offset); + skb->mac_header = offset; } static inline void skb_set_mac_header(struct sk_buff *skb, const int offset) @@ -3130,9 +3161,15 @@ static inline int skb_inner_network_offset(const struct sk_buff *skb) return skb_inner_network_header(skb) - skb->data; } +static inline enum skb_drop_reason +pskb_network_may_pull_reason(struct sk_buff *skb, unsigned int len) +{ + return pskb_may_pull_reason(skb, skb_network_offset(skb) + len); +} + static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len) { - return pskb_may_pull(skb, skb_network_offset(skb) + len); + return pskb_network_may_pull_reason(skb, len) == SKB_NOT_DROPPED_YET; } /* @@ -3210,6 +3247,7 @@ static inline int __pskb_trim(struct sk_buff *skb, unsigned int len) static inline int pskb_trim(struct sk_buff *skb, unsigned int len) { + skb_might_realloc(skb); return (len < skb->len) ? __pskb_trim(skb, len) : 0; } @@ -3964,6 +4002,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) { + skb_might_realloc(skb); if (likely(len >= skb->len)) return 0; return pskb_trim_rcsum_slow(skb, len); diff --git a/include/linux/slab.h b/include/linux/slab.h index b35e2db7eb0e..10a971c2bde3 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -77,7 +77,17 @@ enum _slab_flag_bits { #define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON) /* Indicate a kmalloc slab */ #define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC) -/* Align objs on cache lines */ +/** + * define SLAB_HWCACHE_ALIGN - Align objects on cache line boundaries. + * + * Sufficiently large objects are aligned on cache line boundary. For object + * size smaller than a half of cache line size, the alignment is on the half of + * cache line size. In general, if object size is smaller than 1/2^n of cache + * line size, the alignment is adjusted to 1/2^n. + * + * If explicit alignment is also requested by the respective + * &struct kmem_cache_args field, the greater of both is alignments is applied. + */ #define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN) /* Use GFP_DMA memory */ #define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA) @@ -87,8 +97,8 @@ enum _slab_flag_bits { #define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER) /* Panic if kmem_cache_create() fails */ #define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC) -/* - * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! +/** + * define SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! * * This delays freeing the SLAB page by a grace period, it does _NOT_ * delay object freeing. This means that if you do kmem_cache_free() @@ -99,20 +109,22 @@ enum _slab_flag_bits { * stays valid, the trick to using this is relying on an independent * object validation pass. Something like: * - * begin: - * rcu_read_lock(); - * obj = lockless_lookup(key); - * if (obj) { - * if (!try_get_ref(obj)) // might fail for free objects - * rcu_read_unlock(); - * goto begin; + * :: + * + * begin: + * rcu_read_lock(); + * obj = lockless_lookup(key); + * if (obj) { + * if (!try_get_ref(obj)) // might fail for free objects + * rcu_read_unlock(); + * goto begin; * - * if (obj->key != key) { // not the object we expected - * put_ref(obj); - * rcu_read_unlock(); - * goto begin; - * } - * } + * if (obj->key != key) { // not the object we expected + * put_ref(obj); + * rcu_read_unlock(); + * goto begin; + * } + * } * rcu_read_unlock(); * * This is useful if we need to approach a kernel structure obliquely, @@ -137,7 +149,6 @@ enum _slab_flag_bits { * * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. */ -/* Defer freeing slabs to RCU */ #define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU) /* Trace allocations and frees */ #define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE) @@ -170,7 +181,12 @@ enum _slab_flag_bits { #else # define SLAB_FAILSLAB __SLAB_FLAG_UNUSED #endif -/* Account to memcg */ +/** + * define SLAB_ACCOUNT - Account allocations to memcg. + * + * All object allocations from this cache will be memcg accounted, regardless of + * __GFP_ACCOUNT being or not being passed to individual allocations. + */ #ifdef CONFIG_MEMCG # define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT) #else @@ -197,7 +213,13 @@ enum _slab_flag_bits { #endif /* The following flags affect the page allocator grouping pages by mobility */ -/* Objects are reclaimable */ +/** + * define SLAB_RECLAIM_ACCOUNT - Objects are reclaimable. + * + * Use this flag for caches that have an associated shrinker. As a result, slab + * pages are allocated with __GFP_RECLAIMABLE, which affects grouping pages by + * mobility, and are accounted in SReclaimable counter in /proc/meminfo + */ #ifndef CONFIG_SLUB_TINY #define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT) #else @@ -448,6 +470,7 @@ void kfree_sensitive(const void *objp); size_t __ksize(const void *objp); DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) +DEFINE_FREE(kfree_sensitive, void *, if (_T) kfree_sensitive(_T)) /** * ksize - Report actual allocation size of associated object diff --git a/include/linux/soc/mediatek/dvfsrc.h b/include/linux/soc/mediatek/dvfsrc.h new file mode 100644 index 000000000000..1498b3ed396b --- /dev/null +++ b/include/linux/soc/mediatek/dvfsrc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright (c) 2021 MediaTek Inc. + * Copyright (c) 2024 Collabora Ltd. + * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com> + */ + +#ifndef __MEDIATEK_DVFSRC_H +#define __MEDIATEK_DVFSRC_H + +enum mtk_dvfsrc_cmd { + MTK_DVFSRC_CMD_BW, + MTK_DVFSRC_CMD_HRT_BW, + MTK_DVFSRC_CMD_PEAK_BW, + MTK_DVFSRC_CMD_OPP, + MTK_DVFSRC_CMD_VCORE_LEVEL, + MTK_DVFSRC_CMD_VSCP_LEVEL, + MTK_DVFSRC_CMD_MAX, +}; + +#if IS_ENABLED(CONFIG_MTK_DVFSRC) + +int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data); +int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data); + +#else + +static inline int mtk_dvfsrc_send_request(const struct device *dev, u32 cmd, u64 data) +{ return -ENODEV; } + +static inline int mtk_dvfsrc_query_info(const struct device *dev, u32 cmd, int *data) +{ return -ENODEV; } + +#endif /* CONFIG_MTK_DVFSRC */ + +#endif diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index 6c6cccc848f4..9956e18c5ffa 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h @@ -434,6 +434,11 @@ #define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ BIT(7) | BIT(8)) +#define MT6735_TOP_AXI_PROT_EN_CONN (BIT(2) | BIT(8)) +#define MT6735_TOP_AXI_PROT_EN_MD1 (BIT(24) | BIT(25) | \ + BIT(26) | BIT(27) | \ + BIT(28)) + #define INFRA_TOPAXI_PROTECTEN 0x0220 #define INFRA_TOPAXI_PROTECTSTA1 0x0228 #define INFRA_TOPAXI_PROTECTEN_SET 0x0260 diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h index 0761128b4354..abe24a73ee19 100644 --- a/include/linux/soc/mediatek/mtk_sip_svc.h +++ b/include/linux/soc/mediatek/mtk_sip_svc.h @@ -22,6 +22,9 @@ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \ ARM_SMCCC_OWNER_SIP, fn_id) +/* DVFSRC SMC calls */ +#define MTK_SIP_DVFSRC_VCOREFS_CONTROL MTK_SIP_SMC_CMD(0x506) + /* IOMMU related SMC call */ #define MTK_SIP_KERNEL_IOMMU_CONTROL MTK_SIP_SMC_CMD(0x514) diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 2f20281d4ad4..8e5d78fb4847 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h @@ -54,7 +54,19 @@ #define LLCC_CAMEXP4 52 #define LLCC_DISP_WB 53 #define LLCC_DISP_1 54 +#define LLCC_VIEYE 57 +#define LLCC_VIDPTH 58 +#define LLCC_GPUMV 59 +#define LLCC_EVA_LEFT 60 +#define LLCC_EVA_RIGHT 61 +#define LLCC_EVAGAIN 62 +#define LLCC_VIPTH 63 #define LLCC_VIDVSP 64 +#define LLCC_DISP_LEFT 65 +#define LLCC_DISP_RIGHT 66 +#define LLCC_EVCS_LEFT 67 +#define LLCC_EVCS_RIGHT 68 +#define LLCC_SPAD 69 /** * struct llcc_slice_desc - Cache slice descriptor diff --git a/include/linux/soc/ti/ti_sci_protocol.h b/include/linux/soc/ti/ti_sci_protocol.h index bd0d11af76c5..fd104b666836 100644 --- a/include/linux/soc/ti/ti_sci_protocol.h +++ b/include/linux/soc/ti/ti_sci_protocol.h @@ -195,6 +195,35 @@ struct ti_sci_clk_ops { u64 *current_freq); }; +/* TISCI LPM IO isolation control values */ +#define TISCI_MSG_VALUE_IO_ENABLE 1 +#define TISCI_MSG_VALUE_IO_DISABLE 0 + +/* TISCI LPM constraint state values */ +#define TISCI_MSG_CONSTRAINT_SET 1 +#define TISCI_MSG_CONSTRAINT_CLR 0 + +/** + * struct ti_sci_pm_ops - Low Power Mode (LPM) control operations + * @lpm_wake_reason: Get the wake up source that woke the SoC from LPM + * - source: The wake up source that woke soc from LPM. + * - timestamp: Timestamp at which soc woke. + * @set_device_constraint: Set LPM constraint on behalf of a device + * - id: Device Identifier + * - state: The desired state of device constraint: set or clear. + * @set_latency_constraint: Set LPM resume latency constraint + * - latency: maximum acceptable latency to wake up from low power mode + * - state: The desired state of latency constraint: set or clear. + */ +struct ti_sci_pm_ops { + int (*lpm_wake_reason)(const struct ti_sci_handle *handle, + u32 *source, u64 *timestamp, u8 *pin, u8 *mode); + int (*set_device_constraint)(const struct ti_sci_handle *handle, + u32 id, u8 state); + int (*set_latency_constraint)(const struct ti_sci_handle *handle, + u16 latency, u8 state); +}; + /** * struct ti_sci_resource_desc - Description of TI SCI resource instance range. * @start: Start index of the first resource range. @@ -539,6 +568,7 @@ struct ti_sci_ops { struct ti_sci_core_ops core_ops; struct ti_sci_dev_ops dev_ops; struct ti_sci_clk_ops clk_ops; + struct ti_sci_pm_ops pm_ops; struct ti_sci_rm_core_ops rm_core_ops; struct ti_sci_rm_irq_ops rm_irq_ops; struct ti_sci_rm_ringacc_ops rm_ring_ops; diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h index 195debe2b1db..3e6c8e9d67ae 100644 --- a/include/linux/sockptr.h +++ b/include/linux/sockptr.h @@ -53,6 +53,8 @@ static inline int copy_from_sockptr_offset(void *dst, sockptr_t src, /* Deprecated. * This is unsafe, unless caller checked user provided optlen. * Prefer copy_safe_from_sockptr() instead. + * + * Returns 0 for success, or number of bytes not copied on error. */ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size) { diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 5e0dd47a0412..bd9836690da6 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -4,12 +4,20 @@ #ifndef __SOUNDWIRE_H #define __SOUNDWIRE_H +#include <linux/bitfield.h> #include <linux/bug.h> -#include <linux/lockdep_types.h> +#include <linux/completion.h> +#include <linux/device.h> #include <linux/irq.h> #include <linux/irqdomain.h> +#include <linux/lockdep_types.h> #include <linux/mod_devicetable.h> -#include <linux/bitfield.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <sound/sdca.h> + +struct dentry; +struct fwnode_handle; struct sdw_bus; struct sdw_slave; @@ -226,64 +234,36 @@ enum sdw_clk_stop_mode { /** * struct sdw_dp0_prop - DP0 properties + * @words: wordlengths supported * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 * (inclusive) * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 * (inclusive) * @num_words: number of wordlengths supported - * @words: wordlengths supported + * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @BRA_flow_controlled: Slave implementation results in an OK_NotReady * response * @simple_ch_prep_sm: If channel prepare sequence is required - * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts + * @num_lanes: array size of @lane_list + * @lane_list: indicates which Lanes can be used by DP0 * * The wordlengths are specified by Spec as max, min AND number of * discrete values, implementation can define based on the wordlengths they * support */ struct sdw_dp0_prop { + u32 *words; u32 max_word; u32 min_word; u32 num_words; - u32 *words; + u32 ch_prep_timeout; bool BRA_flow_controlled; bool simple_ch_prep_sm; - u32 ch_prep_timeout; bool imp_def_interrupts; -}; - -/** - * struct sdw_dpn_audio_mode - Audio mode properties for DPn - * @bus_min_freq: Minimum bus frequency, in Hz - * @bus_max_freq: Maximum bus frequency, in Hz - * @bus_num_freq: Number of discrete frequencies supported - * @bus_freq: Discrete bus frequencies, in Hz - * @min_freq: Minimum sampling frequency, in Hz - * @max_freq: Maximum sampling bus frequency, in Hz - * @num_freq: Number of discrete sampling frequency supported - * @freq: Discrete sampling frequencies, in Hz - * @prep_ch_behave: Specifies the dependencies between Channel Prepare - * sequence and bus clock configuration - * If 0, Channel Prepare can happen at any Bus clock rate - * If 1, Channel Prepare sequence shall happen only after Bus clock is - * changed to a frequency supported by this mode or compatible modes - * described by the next field - * @glitchless: Bitmap describing possible glitchless transitions from this - * Audio Mode to other Audio Modes - */ -struct sdw_dpn_audio_mode { - u32 bus_min_freq; - u32 bus_max_freq; - u32 bus_num_freq; - u32 *bus_freq; - u32 max_freq; - u32 min_freq; - u32 num_freq; - u32 *freq; - u32 prep_ch_behave; - u32 glitchless; + int num_lanes; + u32 *lane_list; }; /** @@ -298,24 +278,25 @@ struct sdw_dpn_audio_mode { * @type: Data port type. Full, Simplified or Reduced * @max_grouping: Maximum number of samples that can be grouped together for * a full data port - * @simple_ch_prep_sm: If the port supports simplified channel prepare state - * machine * @ch_prep_timeout: Port-specific timeout value, in milliseconds * @imp_def_interrupts: If set, each bit corresponds to support for * implementation-defined interrupts * @max_ch: Maximum channels supported * @min_ch: Minimum channels supported * @num_channels: Number of discrete channels supported - * @channels: Discrete channels supported * @num_ch_combinations: Number of channel combinations supported + * @channels: Discrete channels supported * @ch_combinations: Channel combinations supported + * @lane_list: indicates which Lanes can be used by DPn + * @num_lanes: array size of @lane_list * @modes: SDW mode supported * @max_async_buffer: Number of samples that this port can buffer in * asynchronous modes + * @port_encoding: Payload Channel Sample encoding schemes supported * @block_pack_mode: Type of block port mode supported * @read_only_wordlength: Read Only wordlength field in DPN_BlockCtrl1 register - * @port_encoding: Payload Channel Sample encoding schemes supported - * @audio_modes: Audio modes supported + * @simple_ch_prep_sm: If the port supports simplified channel prepare state + * machine */ struct sdw_dpn_prop { u32 num; @@ -325,25 +306,29 @@ struct sdw_dpn_prop { u32 *words; enum sdw_dpn_type type; u32 max_grouping; - bool simple_ch_prep_sm; u32 ch_prep_timeout; u32 imp_def_interrupts; u32 max_ch; u32 min_ch; u32 num_channels; - u32 *channels; u32 num_ch_combinations; + u32 *channels; u32 *ch_combinations; + u32 *lane_list; + int num_lanes; u32 modes; u32 max_async_buffer; + u32 port_encoding; bool block_pack_mode; bool read_only_wordlength; - u32 port_encoding; - struct sdw_dpn_audio_mode *audio_modes; + bool simple_ch_prep_sm; }; /** * struct sdw_slave_prop - SoundWire Slave properties + * @dp0_prop: Data Port 0 properties + * @src_dpn_prop: Source Data Port N properties + * @sink_dpn_prop: Sink Data Port N properties * @mipi_revision: Spec version of the implementation * @wake_capable: Wake-up events are supported * @test_mode_capable: If test mode is supported @@ -360,23 +345,26 @@ struct sdw_dpn_prop { * SCP_AddrPage2 * @bank_delay_support: Slave implements bank delay/bridge support registers * SCP_BankDelay and SCP_NextFrame + * @lane_control_support: Slave supports lane control * @p15_behave: Slave behavior when the Master attempts a read to the Port15 * alias - * @lane_control_support: Slave supports lane control * @master_count: Number of Masters present on this Slave * @source_ports: Bitmap identifying source ports * @sink_ports: Bitmap identifying sink ports - * @dp0_prop: Data Port 0 properties - * @src_dpn_prop: Source Data Port N properties - * @sink_dpn_prop: Sink Data Port N properties - * @scp_int1_mask: SCP_INT1_MASK desired settings * @quirks: bitmask identifying deltas from the MIPI specification + * @sdca_interrupt_register_list: indicates which sets of SDCA interrupt status + * and masks are supported + * @commit_register_supported: is PCP_Commit register supported + * @scp_int1_mask: SCP_INT1_MASK desired settings * @clock_reg_supported: the Peripheral implements the clock base and scale * registers introduced with the SoundWire 1.2 specification. SDCA devices * do not need to set this boolean property as the registers are required. * @use_domain_irq: call actual IRQ handler on slave, as well as callback */ struct sdw_slave_prop { + struct sdw_dp0_prop *dp0_prop; + struct sdw_dpn_prop *src_dpn_prop; + struct sdw_dpn_prop *sink_dpn_prop; u32 mipi_revision; bool wake_capable; bool test_mode_capable; @@ -388,16 +376,15 @@ struct sdw_slave_prop { bool high_PHY_capable; bool paging_support; bool bank_delay_support; - enum sdw_p15_behave p15_behave; bool lane_control_support; + enum sdw_p15_behave p15_behave; u32 master_count; u32 source_ports; u32 sink_ports; - struct sdw_dp0_prop *dp0_prop; - struct sdw_dpn_prop *src_dpn_prop; - struct sdw_dpn_prop *sink_dpn_prop; - u8 scp_int1_mask; u32 quirks; + u32 sdca_interrupt_register_list; + u8 commit_register_supported; + u8 scp_int1_mask; bool clock_reg_supported; bool use_domain_irq; }; @@ -406,13 +393,14 @@ struct sdw_slave_prop { /** * struct sdw_master_prop - Master properties + * @clk_gears: Clock gears supported + * @clk_freq: Clock frequencies supported, in Hz + * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification * @revision: MIPI spec version of the implementation * @clk_stop_modes: Bitmap, bit N set when clock-stop-modeN supported * @max_clk_freq: Maximum Bus clock frequency, in Hz * @num_clk_gears: Number of clock gears supported - * @clk_gears: Clock gears supported * @num_clk_freq: Number of clock frequencies supported, in Hz - * @clk_freq: Clock frequencies supported, in Hz * @default_frame_rate: Controller default Frame rate, in Hz * @default_row: Number of rows * @default_col: Number of columns @@ -421,24 +409,23 @@ struct sdw_slave_prop { * command * @mclk_freq: clock reference passed to SoundWire Master, in Hz. * @hw_disabled: if true, the Master is not functional, typically due to pin-mux - * @quirks: bitmask identifying optional behavior beyond the scope of the MIPI specification */ struct sdw_master_prop { + u32 *clk_gears; + u32 *clk_freq; + u64 quirks; u32 revision; u32 clk_stop_modes; u32 max_clk_freq; u32 num_clk_gears; - u32 *clk_gears; u32 num_clk_freq; - u32 *clk_freq; u32 default_frame_rate; u32 default_row; u32 default_col; - bool dynamic_frame; u32 err_threshold; u32 mclk_freq; + bool dynamic_frame; bool hw_disabled; - u64 quirks; }; /* Definitions for Master quirks */ @@ -488,9 +475,9 @@ struct sdw_slave_id { __u8 sdw_version:4; }; -struct sdw_extended_slave_id { - int link_id; - struct sdw_slave_id id; +struct sdw_peripherals { + int num_peripherals; + struct sdw_slave *array[]; }; /* @@ -630,7 +617,6 @@ struct sdw_slave_ops { int (*clk_stop)(struct sdw_slave *slave, enum sdw_clk_stop_mode mode, enum sdw_clk_stop_type type); - }; /** @@ -663,6 +649,7 @@ struct sdw_slave_ops { * @is_mockup_device: status flag used to squelch errors in the command/control * protocol for SoundWire mockup devices * @sdw_dev_lock: mutex used to protect callbacks/remove races + * @sdca_data: structure containing all device data for SDCA helpers */ struct sdw_slave { struct sdw_slave_id id; @@ -686,6 +673,7 @@ struct sdw_slave { bool first_interrupt_done; bool is_mockup_device; struct mutex sdw_dev_lock; /* protect callbacks/remove races */ + struct sdca_device_data sdca_data; }; #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) @@ -704,8 +692,7 @@ struct sdw_master_device { container_of(d, struct sdw_master_device, dev) struct sdw_driver { - int (*probe)(struct sdw_slave *sdw, - const struct sdw_device_id *id); + int (*probe)(struct sdw_slave *sdw, const struct sdw_device_id *id); int (*remove)(struct sdw_slave *sdw); void (*shutdown)(struct sdw_slave *sdw); @@ -724,7 +711,7 @@ struct sdw_driver { SDW_SLAVE_ENTRY_EXT((_mfg_id), (_part_id), 0, 0, (_drv_data)) int sdw_handle_slave_status(struct sdw_bus *bus, - enum sdw_slave_status status[]); + enum sdw_slave_status status[]); /* * SDW master structures and APIs @@ -806,29 +793,28 @@ struct sdw_enable_ch { */ struct sdw_master_port_ops { int (*dpn_set_port_params)(struct sdw_bus *bus, - struct sdw_port_params *port_params, - unsigned int bank); + struct sdw_port_params *port_params, + unsigned int bank); int (*dpn_set_port_transport_params)(struct sdw_bus *bus, - struct sdw_transport_params *transport_params, - enum sdw_reg_bank bank); - int (*dpn_port_prep)(struct sdw_bus *bus, - struct sdw_prepare_ch *prepare_ch); + struct sdw_transport_params *transport_params, + enum sdw_reg_bank bank); + int (*dpn_port_prep)(struct sdw_bus *bus, struct sdw_prepare_ch *prepare_ch); int (*dpn_port_enable_ch)(struct sdw_bus *bus, - struct sdw_enable_ch *enable_ch, unsigned int bank); + struct sdw_enable_ch *enable_ch, unsigned int bank); }; struct sdw_msg; /** - * struct sdw_defer - SDW deffered message - * @length: message length + * struct sdw_defer - SDW deferred message * @complete: message completion * @msg: SDW message + * @length: message length */ struct sdw_defer { + struct sdw_msg *msg; int length; struct completion complete; - struct sdw_msg *msg; }; /** @@ -849,14 +835,11 @@ struct sdw_defer { */ struct sdw_master_ops { int (*read_prop)(struct sdw_bus *bus); - u64 (*override_adr) - (struct sdw_bus *bus, u64 addr); - enum sdw_command_response (*xfer_msg) - (struct sdw_bus *bus, struct sdw_msg *msg); - enum sdw_command_response (*xfer_msg_defer) - (struct sdw_bus *bus); + u64 (*override_adr)(struct sdw_bus *bus, u64 addr); + enum sdw_command_response (*xfer_msg)(struct sdw_bus *bus, struct sdw_msg *msg); + enum sdw_command_response (*xfer_msg_defer)(struct sdw_bus *bus); int (*set_bus_conf)(struct sdw_bus *bus, - struct sdw_bus_params *params); + struct sdw_bus_params *params); int (*pre_bank_switch)(struct sdw_bus *bus); int (*post_bank_switch)(struct sdw_bus *bus); u32 (*read_ping_status)(struct sdw_bus *bus); @@ -871,68 +854,71 @@ struct sdw_master_ops { * struct sdw_bus - SoundWire bus * @dev: Shortcut to &bus->md->dev to avoid changing the entire code. * @md: Master device - * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used. - * @link_id: Link id number, can be 0 to N, unique for each Controller - * @id: bus system-wide unique id - * @slaves: list of Slaves on this bus - * @assigned: Bitmap for Slave device numbers. - * Bit set implies used number, bit clear implies unused number. + * @bus_lock_key: bus lock key associated to @bus_lock * @bus_lock: bus lock + * @slaves: list of Slaves on this bus + * @msg_lock_key: message lock key associated to @msg_lock * @msg_lock: message lock - * @compute_params: points to Bus resource management implementation - * @ops: Master callback ops - * @port_ops: Master port callback ops - * @params: Current bus parameters - * @prop: Master properties - * @vendor_specific_prop: pointer to non-standard properties * @m_rt_list: List of Master instance of all stream(s) running on Bus. This * is used to compute and program bus bandwidth, clock, frame shape, * transport and port parameters - * @debugfs: Bus debugfs - * @domain: IRQ domain * @defer_msg: Defer message - * @clk_stop_timeout: Clock stop timeout computed - * @bank_switch_timeout: Bank switch timeout computed - * @multi_link: Store bus property that indicates if multi links - * are supported. This flag is populated by drivers after reading - * appropriate firmware (ACPI/DT). + * @params: Current bus parameters + * @stream_refcount: number of streams currently using this bus + * @ops: Master callback ops + * @port_ops: Master port callback ops + * @prop: Master properties + * @vendor_specific_prop: pointer to non-standard properties * @hw_sync_min_links: Number of links used by a stream above which * hardware-based synchronization is required. This value is only * meaningful if multi_link is set. If set to 1, hardware-based * synchronization will be used even if a stream only uses a single * SoundWire segment. - * @stream_refcount: number of streams currently using this bus + * @controller_id: system-unique controller ID. If set to -1, the bus @id will be used. + * @link_id: Link id number, can be 0 to N, unique for each Controller + * @id: bus system-wide unique id + * @compute_params: points to Bus resource management implementation + * @assigned: Bitmap for Slave device numbers. + * Bit set implies used number, bit clear implies unused number. + * @clk_stop_timeout: Clock stop timeout computed + * @bank_switch_timeout: Bank switch timeout computed + * @domain: IRQ domain + * @irq_chip: IRQ chip + * @debugfs: Bus debugfs (optional) + * @multi_link: Store bus property that indicates if multi links + * are supported. This flag is populated by drivers after reading + * appropriate firmware (ACPI/DT). */ struct sdw_bus { struct device *dev; struct sdw_master_device *md; - int controller_id; - unsigned int link_id; - int id; - struct list_head slaves; - DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); - struct mutex bus_lock; struct lock_class_key bus_lock_key; - struct mutex msg_lock; + struct mutex bus_lock; + struct list_head slaves; struct lock_class_key msg_lock_key; - int (*compute_params)(struct sdw_bus *bus); + struct mutex msg_lock; + struct list_head m_rt_list; + struct sdw_defer defer_msg; + struct sdw_bus_params params; + int stream_refcount; const struct sdw_master_ops *ops; const struct sdw_master_port_ops *port_ops; - struct sdw_bus_params params; struct sdw_master_prop prop; void *vendor_specific_prop; - struct list_head m_rt_list; + int hw_sync_min_links; + int controller_id; + unsigned int link_id; + int id; + int (*compute_params)(struct sdw_bus *bus); + DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); + unsigned int clk_stop_timeout; + u32 bank_switch_timeout; + struct irq_chip irq_chip; + struct irq_domain *domain; #ifdef CONFIG_DEBUG_FS struct dentry *debugfs; #endif - struct irq_chip irq_chip; - struct irq_domain *domain; - struct sdw_defer defer_msg; - unsigned int clk_stop_timeout; - u32 bank_switch_timeout; bool multi_link; - int hw_sync_min_links; - int stream_refcount; }; int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent, @@ -1010,18 +996,18 @@ struct sdw_stream_params { * @params: Stream parameters * @state: Current state of the stream * @type: Stream type PCM or PDM + * @m_rt_count: Count of Master runtime(s) in this stream * @master_list: List of Master runtime(s) in this stream. * master_list can contain only one m_rt per Master instance * for a stream - * @m_rt_count: Count of Master runtime(s) in this stream */ struct sdw_stream_runtime { const char *name; struct sdw_stream_params params; enum sdw_stream_state state; enum sdw_stream_type type; - struct list_head master_list; int m_rt_count; + struct list_head master_list; }; struct sdw_stream_runtime *sdw_alloc_stream(const char *stream_name); @@ -1030,12 +1016,12 @@ void sdw_release_stream(struct sdw_stream_runtime *stream); int sdw_compute_params(struct sdw_bus *bus); int sdw_stream_add_master(struct sdw_bus *bus, - struct sdw_stream_config *stream_config, - const struct sdw_port_config *port_config, - unsigned int num_ports, - struct sdw_stream_runtime *stream); + struct sdw_stream_config *stream_config, + const struct sdw_port_config *port_config, + unsigned int num_ports, + struct sdw_stream_runtime *stream); int sdw_stream_remove_master(struct sdw_bus *bus, - struct sdw_stream_runtime *stream); + struct sdw_stream_runtime *stream); int sdw_startup_stream(void *sdw_substream); int sdw_prepare_stream(struct sdw_stream_runtime *stream); int sdw_enable_stream(struct sdw_stream_runtime *stream); diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h index 28a4eb77717f..799f8578137b 100644 --- a/include/linux/soundwire/sdw_amd.h +++ b/include/linux/soundwire/sdw_amd.h @@ -27,9 +27,11 @@ #define ACP_SDW0 0 #define ACP_SDW1 1 #define AMD_SDW_MAX_MANAGER_COUNT 2 +#define ACP63_PCI_REV_ID 0x63 struct acp_sdw_pdata { u16 instance; + u32 acp_rev; /* mutex to protect acp common register access */ struct mutex *acp_sdw_lock; }; @@ -66,6 +68,7 @@ struct sdw_amd_dai_runtime { * @instance: SoundWire manager instance * @quirks: SoundWire manager quirks * @wake_en_mask: wake enable mask per SoundWire manager + * @acp_rev: acp pci device revision id * @clk_stopped: flag set to true when clock is stopped * @power_mode_mask: flag interprets amd SoundWire manager power mode * @dai_runtime_array: dai runtime array @@ -94,6 +97,7 @@ struct amd_sdw_manager { u32 quirks; u32 wake_en_mask; u32 power_mode_mask; + u32 acp_rev; bool clk_stopped; struct sdw_amd_dai_runtime **dai_runtime_array; @@ -115,25 +119,23 @@ struct sdw_amd_acpi_info { * struct sdw_amd_ctx - context allocated by the controller driver probe * * @count: link count - * @num_slaves: total number of devices exposed across all enabled links * @link_mask: bit-wise mask listing SoundWire links reported by the * Controller - * @ids: array of slave_id, representing Slaves exposed across all enabled - * links * @pdev: platform device structure + * @peripherals: array representing Peripherals exposed across all enabled links */ struct sdw_amd_ctx { int count; - int num_slaves; u32 link_mask; - struct sdw_extended_slave_id *ids; struct platform_device *pdev[AMD_SDW_MAX_MANAGER_COUNT]; + struct sdw_peripherals *peripherals; }; /** * struct sdw_amd_res - Soundwire AMD global resource structure, * typically populated by the DSP driver/Legacy driver * + * @acp_rev: acp pci device revision id * @addr: acp pci device resource start address * @reg_range: ACP register range * @link_mask: bit-wise mask listing links selected by the DSP driver/ @@ -146,6 +148,7 @@ struct sdw_amd_ctx { * @acp_lock: mutex protecting acp common registers access */ struct sdw_amd_res { + u32 acp_rev; u32 addr; u32 reg_range; u32 link_mask; diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 734dc1fa3b5b..580086417e4b 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -4,6 +4,7 @@ #ifndef __SDW_INTEL_H #define __SDW_INTEL_H +#include <linux/acpi.h> #include <linux/irqreturn.h> #include <linux/soundwire/sdw.h> @@ -286,31 +287,28 @@ struct hdac_bus; * hardware capabilities after all power dependencies are settled. * @link_mask: bit-wise mask listing SoundWire links reported by the * Controller - * @num_slaves: total number of devices exposed across all enabled links * @handle: ACPI parent handle * @ldev: information for each link (controller-specific and kept * opaque here) - * @ids: array of slave_id, representing Slaves exposed across all enabled - * links * @link_list: list to handle interrupts across all links * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers. * @shim_mask: flags to track initialization of SHIM shared registers * @shim_base: sdw shim base. * @alh_base: sdw alh base. + * @peripherals: array representing Peripherals exposed across all enabled links */ struct sdw_intel_ctx { int count; void __iomem *mmio_base; u32 link_mask; - int num_slaves; acpi_handle handle; struct sdw_intel_link_dev **ldev; - struct sdw_extended_slave_id *ids; struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ u32 shim_mask; u32 shim_base; u32 alh_base; + struct sdw_peripherals *peripherals; }; /** diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index f866d5c8ed32..c46d2b8029be 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -90,6 +90,8 @@ enum spi_mem_data_dir { * @data.buswidth: number of IO lanes used to send/receive the data * @data.dtr: whether the data should be sent in DTR mode or not * @data.ecc: whether error correction is required or not + * @data.swap16: whether the byte order of 16-bit words is swapped when read + * or written in Octal DTR mode compared to STR mode. * @data.dir: direction of the transfer * @data.nbytes: number of data bytes to send/receive. Can be zero if the * operation does not involve transferring data @@ -124,7 +126,8 @@ struct spi_mem_op { u8 buswidth; u8 dtr : 1; u8 ecc : 1; - u8 __pad : 6; + u8 swap16 : 1; + u8 __pad : 5; enum spi_mem_data_dir dir; unsigned int nbytes; union { @@ -297,10 +300,13 @@ struct spi_controller_mem_ops { * struct spi_controller_mem_caps - SPI memory controller capabilities * @dtr: Supports DTR operations * @ecc: Supports operations with error correction + * @swap16: Supports swapping bytes on a 16 bit boundary when configured in + * Octal DTR */ struct spi_controller_mem_caps { bool dtr; bool ecc; + bool swap16; }; #define spi_mem_controller_is_capable(ctlr, cap) \ diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 4b95663163e0..8497f4747e24 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -824,21 +824,6 @@ void spi_take_timestamp_post(struct spi_controller *ctlr, extern struct spi_controller *__spi_alloc_controller(struct device *host, unsigned int size, bool slave); -static inline struct spi_controller *spi_alloc_master(struct device *host, - unsigned int size) -{ - return __spi_alloc_controller(host, size, false); -} - -static inline struct spi_controller *spi_alloc_slave(struct device *host, - unsigned int size) -{ - if (!IS_ENABLED(CONFIG_SPI_SLAVE)) - return NULL; - - return __spi_alloc_controller(host, size, true); -} - static inline struct spi_controller *spi_alloc_host(struct device *dev, unsigned int size) { @@ -858,21 +843,6 @@ struct spi_controller *__devm_spi_alloc_controller(struct device *dev, unsigned int size, bool slave); -static inline struct spi_controller *devm_spi_alloc_master(struct device *dev, - unsigned int size) -{ - return __devm_spi_alloc_controller(dev, size, false); -} - -static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev, - unsigned int size) -{ - if (!IS_ENABLED(CONFIG_SPI_SLAVE)) - return NULL; - - return __devm_spi_alloc_controller(dev, size, true); -} - static inline struct spi_controller *devm_spi_alloc_host(struct device *dev, unsigned int size) { diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 89eb6f4c659c..9ecb0ab504e3 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -2,7 +2,7 @@ #define __LINUX_SPINLOCK_API_SMP_H #ifndef __LINUX_INSIDE_SPINLOCK_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h index 61c49b16f69a..f6499c37157d 100644 --- a/include/linux/spinlock_rt.h +++ b/include/linux/spinlock_rt.h @@ -16,26 +16,25 @@ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, } #endif -#define spin_lock_init(slock) \ +#define __spin_lock_init(slock, name, key, percpu) \ do { \ - static struct lock_class_key __key; \ - \ rt_mutex_base_init(&(slock)->lock); \ - __rt_spin_lock_init(slock, #slock, &__key, false); \ + __rt_spin_lock_init(slock, name, key, percpu); \ } while (0) -#define local_spin_lock_init(slock) \ +#define _spin_lock_init(slock, percpu) \ do { \ static struct lock_class_key __key; \ - \ - rt_mutex_base_init(&(slock)->lock); \ - __rt_spin_lock_init(slock, #slock, &__key, true); \ + __spin_lock_init(slock, #slock, &__key, percpu); \ } while (0) -extern void rt_spin_lock(spinlock_t *lock); -extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); -extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); -extern void rt_spin_unlock(spinlock_t *lock); +#define spin_lock_init(slock) _spin_lock_init(slock, false) +#define local_spin_lock_init(slock) _spin_lock_init(slock, true) + +extern void rt_spin_lock(spinlock_t *lock) __acquires(lock); +extern void rt_spin_lock_nested(spinlock_t *lock, int subclass) __acquires(lock); +extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) __acquires(lock); +extern void rt_spin_unlock(spinlock_t *lock) __releases(lock); extern void rt_spin_lock_unlock(spinlock_t *lock); extern int rt_spin_trylock_bh(spinlock_t *lock); extern int rt_spin_trylock(spinlock_t *lock); @@ -132,7 +131,7 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, #define spin_trylock_irq(lock) \ __cond_lock(lock, rt_spin_trylock(lock)) -#define __spin_trylock_irqsave(lock, flags) \ +#define spin_trylock_irqsave(lock, flags) \ ({ \ int __locked; \ \ @@ -142,9 +141,6 @@ static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, __locked; \ }) -#define spin_trylock_irqsave(lock, flags) \ - __cond_lock(lock, __spin_trylock_irqsave(lock, flags)) - #define spin_is_contended(lock) (((void)(lock), 0)) static inline int spin_is_locked(spinlock_t *lock) diff --git a/include/linux/spinlock_types_up.h b/include/linux/spinlock_types_up.h index 7f86a2016ac5..fc4e2d017c20 100644 --- a/include/linux/spinlock_types_up.h +++ b/include/linux/spinlock_types_up.h @@ -2,7 +2,7 @@ #define __LINUX_SPINLOCK_TYPES_UP_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif /* diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index c87204247592..1e84e71ca495 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -2,7 +2,7 @@ #define __LINUX_SPINLOCK_UP_H #ifndef __LINUX_INSIDE_SPINLOCK_H -# error "please don't include this file directly" +# error "Please do not include this file directly." #endif #include <asm/processor.h> /* for cpu_relax() */ diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 835bbb2d1f88..08339eb8a01c 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -56,6 +56,13 @@ void call_srcu(struct srcu_struct *ssp, struct rcu_head *head, void cleanup_srcu_struct(struct srcu_struct *ssp); int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); +#ifdef CONFIG_TINY_SRCU +#define __srcu_read_lock_lite __srcu_read_lock +#define __srcu_read_unlock_lite __srcu_read_unlock +#else // #ifdef CONFIG_TINY_SRCU +int __srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp); +void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) __releases(ssp); +#endif // #else // #ifdef CONFIG_TINY_SRCU void synchronize_srcu(struct srcu_struct *ssp); #define SRCU_GET_STATE_COMPLETED 0x1 @@ -176,17 +183,6 @@ static inline int srcu_read_lock_held(const struct srcu_struct *ssp) #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -#define SRCU_NMI_UNKNOWN 0x0 -#define SRCU_NMI_UNSAFE 0x1 -#define SRCU_NMI_SAFE 0x2 - -#if defined(CONFIG_PROVE_RCU) && defined(CONFIG_TREE_SRCU) -void srcu_check_nmi_safety(struct srcu_struct *ssp, bool nmi_safe); -#else -static inline void srcu_check_nmi_safety(struct srcu_struct *ssp, - bool nmi_safe) { } -#endif - /** * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing @@ -236,33 +232,67 @@ static inline void srcu_check_nmi_safety(struct srcu_struct *ssp, * a mutex that is held elsewhere while calling synchronize_srcu() or * synchronize_srcu_expedited(). * - * Note that srcu_read_lock() and the matching srcu_read_unlock() must - * occur in the same context, for example, it is illegal to invoke - * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() - * was invoked in process context. + * The return value from srcu_read_lock() must be passed unaltered + * to the matching srcu_read_unlock(). Note that srcu_read_lock() and + * the matching srcu_read_unlock() must occur in the same context, for + * example, it is illegal to invoke srcu_read_unlock() in an irq handler + * if the matching srcu_read_lock() was invoked in process context. Or, + * for that matter to invoke srcu_read_unlock() from one task and the + * matching srcu_read_lock() from another. */ static inline int srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp) { int retval; - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); retval = __srcu_read_lock(ssp); srcu_lock_acquire(&ssp->dep_map); return retval; } /** + * srcu_read_lock_lite - register a new reader for an SRCU-protected structure. + * @ssp: srcu_struct in which to register the new reader. + * + * Enter an SRCU read-side critical section, but for a light-weight + * smp_mb()-free reader. See srcu_read_lock() for more information. + * + * If srcu_read_lock_lite() is ever used on an srcu_struct structure, + * then none of the other flavors may be used, whether before, during, + * or after. Note that grace-period auto-expediting is disabled for _lite + * srcu_struct structures because auto-expedited grace periods invoke + * synchronize_rcu_expedited(), IPIs and all. + * + * Note that srcu_read_lock_lite() can be invoked only from those contexts + * where RCU is watching, that is, from contexts where it would be legal + * to invoke rcu_read_lock(). Otherwise, lockdep will complain. + */ +static inline int srcu_read_lock_lite(struct srcu_struct *ssp) __acquires(ssp) +{ + int retval; + + srcu_check_read_flavor_lite(ssp); + retval = __srcu_read_lock_lite(ssp); + rcu_try_lock_acquire(&ssp->dep_map); + return retval; +} + +/** * srcu_read_lock_nmisafe - register a new reader for an SRCU-protected structure. * @ssp: srcu_struct in which to register the new reader. * * Enter an SRCU read-side critical section, but in an NMI-safe manner. * See srcu_read_lock() for more information. + * + * If srcu_read_lock_nmisafe() is ever used on an srcu_struct structure, + * then none of the other flavors may be used, whether before, during, + * or after. */ static inline int srcu_read_lock_nmisafe(struct srcu_struct *ssp) __acquires(ssp) { int retval; - srcu_check_nmi_safety(ssp, true); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); retval = __srcu_read_lock_nmisafe(ssp); rcu_try_lock_acquire(&ssp->dep_map); return retval; @@ -274,7 +304,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) { int retval; - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); retval = __srcu_read_lock(ssp); return retval; } @@ -303,7 +333,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) static inline int srcu_down_read(struct srcu_struct *ssp) __acquires(ssp) { WARN_ON_ONCE(in_nmi()); - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); return __srcu_read_lock(ssp); } @@ -318,12 +348,28 @@ static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); srcu_lock_release(&ssp->dep_map); __srcu_read_unlock(ssp, idx); } /** + * srcu_read_unlock_lite - unregister a old reader from an SRCU-protected structure. + * @ssp: srcu_struct in which to unregister the old reader. + * @idx: return value from corresponding srcu_read_lock(). + * + * Exit a light-weight SRCU read-side critical section. + */ +static inline void srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) + __releases(ssp) +{ + WARN_ON_ONCE(idx & ~0x1); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); + srcu_lock_release(&ssp->dep_map); + __srcu_read_unlock_lite(ssp, idx); +} + +/** * srcu_read_unlock_nmisafe - unregister a old reader from an SRCU-protected structure. * @ssp: srcu_struct in which to unregister the old reader. * @idx: return value from corresponding srcu_read_lock(). @@ -334,7 +380,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) __releases(ssp) { WARN_ON_ONCE(idx & ~0x1); - srcu_check_nmi_safety(ssp, true); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NMI); rcu_lock_release(&ssp->dep_map); __srcu_read_unlock_nmisafe(ssp, idx); } @@ -343,7 +389,7 @@ static inline void srcu_read_unlock_nmisafe(struct srcu_struct *ssp, int idx) static inline notrace void srcu_read_unlock_notrace(struct srcu_struct *ssp, int idx) __releases(ssp) { - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); __srcu_read_unlock(ssp, idx); } @@ -360,7 +406,7 @@ static inline void srcu_up_read(struct srcu_struct *ssp, int idx) { WARN_ON_ONCE(idx & ~0x1); WARN_ON_ONCE(in_nmi()); - srcu_check_nmi_safety(ssp, false); + srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_NORMAL); __srcu_read_unlock(ssp, idx); } diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 4d96bbdb45f0..1321da803274 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -81,6 +81,9 @@ static inline void srcu_barrier(struct srcu_struct *ssp) synchronize_srcu(ssp); } +#define srcu_check_read_flavor(ssp, read_flavor) do { } while (0) +#define srcu_check_read_flavor_lite(ssp) do { } while (0) + /* Defined here to avoid size increase for non-torture kernels. */ static inline void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf) diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index ed57598394de..490aeecc6bb4 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -25,7 +25,7 @@ struct srcu_data { /* Read-side state. */ atomic_long_t srcu_lock_count[2]; /* Locks per CPU. */ atomic_long_t srcu_unlock_count[2]; /* Unlocks per CPU. */ - int srcu_nmi_safety; /* NMI-safe srcu_struct structure? */ + int srcu_reader_flavor; /* Reader flavor for srcu_struct structure? */ /* Update-side state. */ spinlock_t __private lock ____cacheline_internodealigned_in_smp; @@ -43,6 +43,11 @@ struct srcu_data { struct srcu_struct *ssp; }; +/* Values for ->srcu_reader_flavor. */ +#define SRCU_READ_FLAVOR_NORMAL 0x1 // srcu_read_lock(). +#define SRCU_READ_FLAVOR_NMI 0x2 // srcu_read_lock_nmisafe(). +#define SRCU_READ_FLAVOR_LITE 0x4 // srcu_read_lock_lite(). + /* * Node in SRCU combining tree, similar in function to rcu_data. */ @@ -204,4 +209,64 @@ void synchronize_srcu_expedited(struct srcu_struct *ssp); void srcu_barrier(struct srcu_struct *ssp); void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf); +/* + * Counts the new reader in the appropriate per-CPU element of the + * srcu_struct. Returns an index that must be passed to the matching + * srcu_read_unlock_lite(). + * + * Note that this_cpu_inc() is an RCU read-side critical section either + * because it disables interrupts, because it is a single instruction, + * or because it is a read-modify-write atomic operation, depending on + * the whims of the architecture. + */ +static inline int __srcu_read_lock_lite(struct srcu_struct *ssp) +{ + int idx; + + RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_lock_lite()."); + idx = READ_ONCE(ssp->srcu_idx) & 0x1; + this_cpu_inc(ssp->sda->srcu_lock_count[idx].counter); /* Y */ + barrier(); /* Avoid leaking the critical section. */ + return idx; +} + +/* + * Removes the count for the old reader from the appropriate + * per-CPU element of the srcu_struct. Note that this may well be a + * different CPU than that which was incremented by the corresponding + * srcu_read_lock_lite(), but it must be within the same task. + * + * Note that this_cpu_inc() is an RCU read-side critical section either + * because it disables interrupts, because it is a single instruction, + * or because it is a read-modify-write atomic operation, depending on + * the whims of the architecture. + */ +static inline void __srcu_read_unlock_lite(struct srcu_struct *ssp, int idx) +{ + barrier(); /* Avoid leaking the critical section. */ + this_cpu_inc(ssp->sda->srcu_unlock_count[idx].counter); /* Z */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "RCU must be watching srcu_read_unlock_lite()."); +} + +void __srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor); + +// Record _lite() usage even for CONFIG_PROVE_RCU=n kernels. +static inline void srcu_check_read_flavor_lite(struct srcu_struct *ssp) +{ + struct srcu_data *sdp = raw_cpu_ptr(ssp->sda); + + if (likely(READ_ONCE(sdp->srcu_reader_flavor) & SRCU_READ_FLAVOR_LITE)) + return; + + // Note that the cmpxchg() in srcu_check_read_flavor() is fully ordered. + __srcu_check_read_flavor(ssp, SRCU_READ_FLAVOR_LITE); +} + +// Record non-_lite() usage only for CONFIG_PROVE_RCU=y kernels. +static inline void srcu_check_read_flavor(struct srcu_struct *ssp, int read_flavor) +{ + if (IS_ENABLED(CONFIG_PROVE_RCU)) + __srcu_check_read_flavor(ssp, read_flavor); +} + #endif diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 5f775e104f9a..a2ab813a9800 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -681,6 +681,27 @@ xdr_stream_decode_u32(struct xdr_stream *xdr, __u32 *ptr) } /** + * xdr_stream_decode_be32 - Decode a big-endian 32-bit integer + * @xdr: pointer to xdr_stream + * @ptr: location to store integer + * + * Return values: + * %0 on success + * %-EBADMSG on XDR buffer overflow + */ +static inline ssize_t +xdr_stream_decode_be32(struct xdr_stream *xdr, __be32 *ptr) +{ + const size_t count = sizeof(*ptr); + __be32 *p = xdr_inline_decode(xdr, count); + + if (unlikely(!p)) + return -EBADMSG; + *ptr = *p; + return 0; +} + +/** * xdr_stream_decode_u64 - Decode a 64-bit integer * @xdr: pointer to xdr_stream * @ptr: location to store 64-bit integer diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h index be9e62371758..20c7270aa64d 100644 --- a/include/linux/sunrpc/xdrgen/_defs.h +++ b/include/linux/sunrpc/xdrgen/_defs.h @@ -23,4 +23,13 @@ typedef struct { u8 *data; } opaque; +#define XDR_void (0) +#define XDR_bool (1) +#define XDR_int (1) +#define XDR_unsigned_int (1) +#define XDR_long (1) +#define XDR_unsigned_long (1) +#define XDR_hyper (2) +#define XDR_unsigned_hyper (2) + #endif /* _SUNRPC_XDRGEN__DEFS_H_ */ diff --git a/include/linux/swapops.h b/include/linux/swapops.h index cb468e418ea1..96f26e29fefe 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -426,9 +426,19 @@ typedef unsigned long pte_marker; * "Poisoned" here is meant in the very general sense of "future accesses are * invalid", instead of referring very specifically to hardware memory errors. * This marker is meant to represent any of various different causes of this. + * + * Note that, when encountered by the faulting logic, PTEs with this marker will + * result in VM_FAULT_HWPOISON and thus regardless trigger hardware memory error + * logic. */ #define PTE_MARKER_POISONED BIT(1) -#define PTE_MARKER_MASK (BIT(2) - 1) +/* + * Indicates that, on fault, this PTE will case a SIGSEGV signal to be + * sent. This means guard markers behave in effect as if the region were mapped + * PROT_NONE, rather than if they were a memory hole or equivalent. + */ +#define PTE_MARKER_GUARD BIT(2) +#define PTE_MARKER_MASK (BIT(3) - 1) static inline swp_entry_t make_pte_marker_entry(pte_marker marker) { @@ -464,6 +474,18 @@ static inline int is_poisoned_swp_entry(swp_entry_t entry) { return is_pte_marker_entry(entry) && (pte_marker_get(entry) & PTE_MARKER_POISONED); + +} + +static inline swp_entry_t make_guard_swp_entry(void) +{ + return make_pte_marker_entry(PTE_MARKER_GUARD); +} + +static inline int is_guard_swp_entry(swp_entry_t entry) +{ + return is_pte_marker_entry(entry) && + (pte_marker_get(entry) & PTE_MARKER_GUARD); } /* diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 5758104921e6..c6333204d451 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -77,6 +77,7 @@ struct cachestat_range; struct cachestat; struct statmount; struct mnt_id_req; +struct xattr_args; #include <linux/types.h> #include <linux/aio_abi.h> @@ -338,23 +339,35 @@ asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op, void __user *arg, unsigned int nr_args); asmlinkage long sys_setxattr(const char __user *path, const char __user *name, const void __user *value, size_t size, int flags); +asmlinkage long sys_setxattrat(int dfd, const char __user *path, unsigned int at_flags, + const char __user *name, + const struct xattr_args __user *args, size_t size); asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, const void __user *value, size_t size, int flags); asmlinkage long sys_fsetxattr(int fd, const char __user *name, const void __user *value, size_t size, int flags); asmlinkage long sys_getxattr(const char __user *path, const char __user *name, void __user *value, size_t size); +asmlinkage long sys_getxattrat(int dfd, const char __user *path, unsigned int at_flags, + const char __user *name, + struct xattr_args __user *args, size_t size); asmlinkage long sys_lgetxattr(const char __user *path, const char __user *name, void __user *value, size_t size); asmlinkage long sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size); asmlinkage long sys_listxattr(const char __user *path, char __user *list, size_t size); +asmlinkage long sys_listxattrat(int dfd, const char __user *path, + unsigned int at_flags, + char __user *list, size_t size); asmlinkage long sys_llistxattr(const char __user *path, char __user *list, size_t size); asmlinkage long sys_flistxattr(int fd, char __user *list, size_t size); asmlinkage long sys_removexattr(const char __user *path, const char __user *name); +asmlinkage long sys_removexattrat(int dfd, const char __user *path, + unsigned int at_flags, + const char __user *name); asmlinkage long sys_lremovexattr(const char __user *path, const char __user *name); asmlinkage long sys_fremovexattr(int fd, const char __user *name); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index aa4c6d44aaa0..40a6ac6c9713 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -90,9 +90,7 @@ int proc_do_static_key(const struct ctl_table *table, int write, void *buffer, /* * Register a set of sysctl names by calling register_sysctl - * with an initialised array of struct ctl_table's. An entry with - * NULL procname terminates the table. table->de will be - * set up by the registration and need not be initialised in advance. + * with an initialised array of struct ctl_table's. * * sysctl names can be mirrored automatically under /proc/sys. The * procname supplied controls /proc naming. @@ -133,7 +131,7 @@ static inline void *proc_sys_poll_event(struct ctl_table_poll *poll) /* A sysctl table is an array of struct ctl_table: */ struct ctl_table { - const char *procname; /* Text ID for /proc/sys, or zero */ + const char *procname; /* Text ID for /proc/sys */ void *data; int maxlen; umode_t mode; @@ -162,7 +160,7 @@ struct ctl_node { struct ctl_table_header { union { struct { - struct ctl_table *ctl_table; + const struct ctl_table *ctl_table; int ctl_table_size; int used; int count; @@ -223,13 +221,13 @@ extern void retire_sysctl_set(struct ctl_table_set *set); struct ctl_table_header *__register_sysctl_table( struct ctl_table_set *set, - const char *path, struct ctl_table *table, size_t table_size); -struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table, + const char *path, const struct ctl_table *table, size_t table_size); +struct ctl_table_header *register_sysctl_sz(const char *path, const struct ctl_table *table, size_t table_size); void unregister_sysctl_table(struct ctl_table_header * table); extern int sysctl_init_bases(void); -extern void __register_sysctl_init(const char *path, struct ctl_table *table, +extern void __register_sysctl_init(const char *path, const struct ctl_table *table, const char *table_name, size_t table_size); #define register_sysctl_init(path, table) \ __register_sysctl_init(path, table, #table, ARRAY_SIZE(table)) @@ -251,7 +249,7 @@ extern int no_unaligned_warning; #else /* CONFIG_SYSCTL */ -static inline void register_sysctl_init(const char *path, struct ctl_table *table) +static inline void register_sysctl_init(const char *path, const struct ctl_table *table) { } @@ -261,7 +259,7 @@ static inline struct ctl_table_header *register_sysctl_mount_point(const char *p } static inline struct ctl_table_header *register_sysctl_sz(const char *path, - struct ctl_table *table, + const struct ctl_table *table, size_t table_size) { return NULL; diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h index bef5f06a91de..07cbab516942 100644 --- a/include/linux/sysfb.h +++ b/include/linux/sysfb.h @@ -60,12 +60,19 @@ struct efifb_dmi_info { void sysfb_disable(struct device *dev); +bool sysfb_handles_screen_info(void); + #else /* CONFIG_SYSFB */ static inline void sysfb_disable(struct device *dev) { } +static inline bool sysfb_handles_screen_info(void) +{ + return false; +} + #endif /* CONFIG_SYSFB */ #ifdef CONFIG_EFI diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index c4e64dc11206..0f2fcd244523 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -87,6 +87,11 @@ do { \ * SYSFS_GROUP_VISIBLE() when assigning this callback to * specify separate _group_visible() and _attr_visible() * handlers. + * @bin_size: + * Optional: Function to return the size of a binary attribute + * of the group. Will be called repeatedly for each binary + * attribute in the group. Overwrites the size field embedded + * inside the attribute itself. * @attrs: Pointer to NULL terminated list of attributes. * @bin_attrs: Pointer to NULL terminated list of binary attributes. * Either attrs or bin_attrs or both must be provided. @@ -96,9 +101,15 @@ struct attribute_group { umode_t (*is_visible)(struct kobject *, struct attribute *, int); umode_t (*is_bin_visible)(struct kobject *, - struct bin_attribute *, int); + const struct bin_attribute *, int); + size_t (*bin_size)(struct kobject *, + const struct bin_attribute *, + int); struct attribute **attrs; - struct bin_attribute **bin_attrs; + union { + struct bin_attribute **bin_attrs; + const struct bin_attribute *const *bin_attrs_new; + }; }; #define SYSFS_PREALLOC 010000 @@ -191,22 +202,22 @@ struct attribute_group { * attributes, the group visibility is determined by the function * specified to is_visible() not is_bin_visible() */ -#define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \ - static inline umode_t sysfs_group_visible_##name( \ - struct kobject *kobj, struct bin_attribute *attr, int n) \ - { \ - if (n == 0 && !name##_group_visible(kobj)) \ - return SYSFS_GROUP_INVISIBLE; \ - return name##_attr_visible(kobj, attr, n); \ +#define DEFINE_SYSFS_BIN_GROUP_VISIBLE(name) \ + static inline umode_t sysfs_group_visible_##name( \ + struct kobject *kobj, const struct bin_attribute *attr, int n) \ + { \ + if (n == 0 && !name##_group_visible(kobj)) \ + return SYSFS_GROUP_INVISIBLE; \ + return name##_attr_visible(kobj, attr, n); \ } -#define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \ - static inline umode_t sysfs_group_visible_##name( \ - struct kobject *kobj, struct bin_attribute *a, int n) \ - { \ - if (n == 0 && !name##_group_visible(kobj)) \ - return SYSFS_GROUP_INVISIBLE; \ - return a->mode; \ +#define DEFINE_SIMPLE_SYSFS_BIN_GROUP_VISIBLE(name) \ + static inline umode_t sysfs_group_visible_##name( \ + struct kobject *kobj, const struct bin_attribute *a, int n) \ + { \ + if (n == 0 && !name##_group_visible(kobj)) \ + return SYSFS_GROUP_INVISIBLE; \ + return a->mode; \ } #define SYSFS_GROUP_VISIBLE(fn) sysfs_group_visible_##fn @@ -297,11 +308,15 @@ struct bin_attribute { struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); + ssize_t (*read_new)(struct file *, struct kobject *, const struct bin_attribute *, + char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); - loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *, + ssize_t (*write_new)(struct file *, struct kobject *, + const struct bin_attribute *, char *, loff_t, size_t); + loff_t (*llseek)(struct file *, struct kobject *, const struct bin_attribute *, loff_t, int); - int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr, + int (*mmap)(struct file *, struct kobject *, const struct bin_attribute *attr, struct vm_area_struct *vma); }; @@ -317,25 +332,36 @@ struct bin_attribute { */ #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) +typedef ssize_t __sysfs_bin_rw_handler_new(struct file *, struct kobject *, + const struct bin_attribute *, char *, loff_t, size_t); + /* macros to create static binary attributes easier */ #define __BIN_ATTR(_name, _mode, _read, _write, _size) { \ .attr = { .name = __stringify(_name), .mode = _mode }, \ - .read = _read, \ - .write = _write, \ + .read = _Generic(_read, \ + __sysfs_bin_rw_handler_new * : NULL, \ + default : _read \ + ), \ + .read_new = _Generic(_read, \ + __sysfs_bin_rw_handler_new * : _read, \ + default : NULL \ + ), \ + .write = _Generic(_write, \ + __sysfs_bin_rw_handler_new * : NULL, \ + default : _write \ + ), \ + .write_new = _Generic(_write, \ + __sysfs_bin_rw_handler_new * : _write, \ + default : NULL \ + ), \ .size = _size, \ } -#define __BIN_ATTR_RO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0444 }, \ - .read = _name##_read, \ - .size = _size, \ -} +#define __BIN_ATTR_RO(_name, _size) \ + __BIN_ATTR(_name, 0444, _name##_read, NULL, _size) -#define __BIN_ATTR_WO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0200 }, \ - .write = _name##_write, \ - .size = _size, \ -} +#define __BIN_ATTR_WO(_name, _size) \ + __BIN_ATTR(_name, 0200, NULL, _name##_write, _size) #define __BIN_ATTR_RW(_name, _size) \ __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) @@ -356,11 +382,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_WO(_name, _size) struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size) -#define __BIN_ATTR_ADMIN_RO(_name, _size) { \ - .attr = { .name = __stringify(_name), .mode = 0400 }, \ - .read = _name##_read, \ - .size = _size, \ -} +#define __BIN_ATTR_ADMIN_RO(_name, _size) \ + __BIN_ATTR(_name, 0400, _name##_read, NULL, _size) #define __BIN_ATTR_ADMIN_RW(_name, _size) \ __BIN_ATTR(_name, 0600, _name##_read, _name##_write, _size) @@ -371,10 +394,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RO(_name, _size) #define BIN_ATTR_ADMIN_RW(_name, _size) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_ADMIN_RW(_name, _size) -#define __BIN_ATTR_SIMPLE_RO(_name, _mode) { \ - .attr = { .name = __stringify(_name), .mode = _mode }, \ - .read = sysfs_bin_attr_simple_read, \ -} +#define __BIN_ATTR_SIMPLE_RO(_name, _mode) \ + __BIN_ATTR(_name, _mode, sysfs_bin_attr_simple_read, NULL, 0) #define BIN_ATTR_SIMPLE_RO(_name) \ struct bin_attribute bin_attr_##_name = __BIN_ATTR_SIMPLE_RO(_name, 0444) diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 6a5e08b937b3..f88daaa76d83 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -200,7 +200,6 @@ struct tcp_sock { /* TX read-mostly hotpath cache lines */ __cacheline_group_begin(tcp_sock_read_tx); - /* timestamp of last sent data packet (for restart window) */ u32 max_window; /* Maximal window ever seen from peer */ u32 rcv_ssthresh; /* Current window clamp */ u32 reordering; /* Packet reordering metric. */ @@ -263,7 +262,7 @@ struct tcp_sock { u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */ u32 write_seq; /* Tail(+1) of data held in tcp send buffer */ u32 pushed_seq; /* Last pushed seq, required to talk to windows */ - u32 lsndtime; + u32 lsndtime; /* timestamp of last sent data packet (for restart window) */ u32 mdev_us; /* medium deviation */ u32 rtt_seq; /* sequence number to update rttvar */ u64 tcp_wstamp_ns; /* departure time for next sent data packet */ diff --git a/include/linux/text-patching.h b/include/linux/text-patching.h new file mode 100644 index 000000000000..ad5877ab0855 --- /dev/null +++ b/include/linux/text-patching.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TEXT_PATCHING_H +#define _LINUX_TEXT_PATCHING_H + +#include <asm/text-patching.h> + +#ifndef text_poke_copy +static inline void *text_poke_copy(void *dst, const void *src, size_t len) +{ + return memcpy(dst, src, len); +} +#define text_poke_copy text_poke_copy +#endif + +#endif /* _LINUX_TEXT_PATCHING_H */ diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 25ea8fe2313e..754802478b96 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -56,6 +56,9 @@ enum thermal_notify_event { THERMAL_TZ_UNBIND_CDEV, /* Cooling dev is unbind from the thermal zone */ THERMAL_INSTANCE_WEIGHT_CHANGED, /* Thermal instance weight changed */ THERMAL_TZ_RESUME, /* Thermal zone is resuming after system sleep */ + THERMAL_TZ_ADD_THRESHOLD, /* Threshold added */ + THERMAL_TZ_DEL_THRESHOLD, /* Threshold deleted */ + THERMAL_TZ_FLUSH_THRESHOLDS, /* All thresholds deleted */ }; /** @@ -137,6 +140,9 @@ struct thermal_cooling_device { #endif }; +DEFINE_GUARD(cooling_dev, struct thermal_cooling_device *, mutex_lock(&_T->lock), + mutex_unlock(&_T->lock)) + /* Structure to define Thermal Zone parameters */ struct thermal_zone_params { const char *governor_name; diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 9ea0b28068f4..cf2446c9c30d 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -59,6 +59,14 @@ enum syscall_work_bit { #include <asm/thread_info.h> +#ifndef TIF_NEED_RESCHED_LAZY +#ifdef CONFIG_ARCH_HAS_PREEMPT_LAZY +#error Inconsistent PREEMPT_LAZY +#endif +#define TIF_NEED_RESCHED_LAZY TIF_NEED_RESCHED +#define _TIF_NEED_RESCHED_LAZY _TIF_NEED_RESCHED +#endif + #ifdef __KERNEL__ #ifndef arch_set_restart_data @@ -179,22 +187,27 @@ static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti #ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H -static __always_inline bool tif_need_resched(void) +static __always_inline bool tif_test_bit(int bit) { - return arch_test_bit(TIF_NEED_RESCHED, + return arch_test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #else -static __always_inline bool tif_need_resched(void) +static __always_inline bool tif_test_bit(int bit) { - return test_bit(TIF_NEED_RESCHED, + return test_bit(bit, (unsigned long *)(¤t_thread_info()->flags)); } #endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */ +static __always_inline bool tif_need_resched(void) +{ + return tif_test_bit(TIF_NEED_RESCHED); +} + #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, const void * const stackend, diff --git a/include/linux/tick.h b/include/linux/tick.h index 99c9c5a7252a..b8ddc8e631a3 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -20,12 +20,10 @@ extern void __init tick_init(void); extern void tick_suspend_local(void); /* Should be core only, but XEN resume magic and ARM BL switcher require it */ extern void tick_resume_local(void); -extern void tick_cleanup_dead_cpu(int cpu); #else /* CONFIG_GENERIC_CLOCKEVENTS */ static inline void tick_init(void) { } static inline void tick_suspend_local(void) { } static inline void tick_resume_local(void) { } -static inline void tick_cleanup_dead_cpu(int cpu) { } #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_HOTPLUG_CPU) diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 902c20ef495a..e39d4d563b19 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -26,7 +26,7 @@ * occupies a single 64byte cache line. * * The struct is separate from struct timekeeper as it is also used - * for a fast NMI safe accessors. + * for the fast NMI safe accessors. * * @base_real is for the fast NMI safe accessor to allow reading clock * realtime from any context. @@ -44,33 +44,38 @@ struct tk_read_base { /** * struct timekeeper - Structure holding internal timekeeping values. - * @tkr_mono: The readout base structure for CLOCK_MONOTONIC - * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW - * @xtime_sec: Current CLOCK_REALTIME time in seconds - * @ktime_sec: Current CLOCK_MONOTONIC time in seconds - * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset - * @offs_real: Offset clock monotonic -> clock realtime - * @offs_boot: Offset clock monotonic -> clock boottime - * @offs_tai: Offset clock monotonic -> clock tai - * @tai_offset: The current UTC to TAI offset in seconds - * @clock_was_set_seq: The sequence number of clock was set events - * @cs_was_changed_seq: The sequence number of clocksource change events - * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second - * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds - * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset - * @cycle_interval: Number of clock cycles in one NTP interval - * @xtime_interval: Number of clock shifted nano seconds in one NTP - * interval. - * @xtime_remainder: Shifted nano seconds left over when rounding - * @cycle_interval - * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. - * @ntp_error: Difference between accumulated time and NTP time in ntp - * shifted nano seconds. - * @ntp_error_shift: Shift conversion between clock shifted nano seconds and - * ntp shifted nano seconds. - * @last_warning: Warning ratelimiter (DEBUG_TIMEKEEPING) - * @underflow_seen: Underflow warning flag (DEBUG_TIMEKEEPING) - * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING) + * @tkr_mono: The readout base structure for CLOCK_MONOTONIC + * @xtime_sec: Current CLOCK_REALTIME time in seconds + * @ktime_sec: Current CLOCK_MONOTONIC time in seconds + * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset + * @offs_real: Offset clock monotonic -> clock realtime + * @offs_boot: Offset clock monotonic -> clock boottime + * @offs_tai: Offset clock monotonic -> clock tai + * @tai_offset: The current UTC to TAI offset in seconds + * @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW + * @raw_sec: CLOCK_MONOTONIC_RAW time in seconds + * @clock_was_set_seq: The sequence number of clock was set events + * @cs_was_changed_seq: The sequence number of clocksource change events + * @monotonic_to_boot: CLOCK_MONOTONIC to CLOCK_BOOTTIME offset + * @cycle_interval: Number of clock cycles in one NTP interval + * @xtime_interval: Number of clock shifted nano seconds in one NTP + * interval. + * @xtime_remainder: Shifted nano seconds left over when rounding + * @cycle_interval + * @raw_interval: Shifted raw nano seconds accumulated per NTP interval. + * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second + * @ntp_tick: The ntp_tick_length() value currently being + * used. This cached copy ensures we consistently + * apply the tick length for an entire tick, as + * ntp_tick_length may change mid-tick, and we don't + * want to apply that new value to the tick in + * progress. + * @ntp_error: Difference between accumulated time and NTP time in ntp + * shifted nano seconds. + * @ntp_error_shift: Shift conversion between clock shifted nano seconds and + * ntp shifted nano seconds. + * @ntp_err_mult: Multiplication factor for scaled math conversion + * @skip_second_overflow: Flag used to avoid updating NTP twice with same second * * Note: For timespec(64) based interfaces wall_to_monotonic is what * we need to add to xtime (or xtime corrected for sub jiffy times) @@ -88,10 +93,28 @@ struct tk_read_base { * * @monotonic_to_boottime is a timespec64 representation of @offs_boot to * accelerate the VDSO update for CLOCK_BOOTTIME. + * + * The cacheline ordering of the structure is optimized for in kernel usage of + * the ktime_get() and ktime_get_ts64() family of time accessors. Struct + * timekeeper is prepended in the core timekeeping code with a sequence count, + * which results in the following cacheline layout: + * + * 0: seqcount, tkr_mono + * 1: xtime_sec ... tai_offset + * 2: tkr_raw, raw_sec + * 3,4: Internal variables + * + * Cacheline 0,1 contain the data which is used for accessing + * CLOCK_MONOTONIC/REALTIME/BOOTTIME/TAI, while cacheline 2 contains the + * data for accessing CLOCK_MONOTONIC_RAW. Cacheline 3,4 are internal + * variables which are only accessed during timekeeper updates once per + * tick. */ struct timekeeper { + /* Cacheline 0 (together with prepended seqcount of timekeeper core): */ struct tk_read_base tkr_mono; - struct tk_read_base tkr_raw; + + /* Cacheline 1: */ u64 xtime_sec; unsigned long ktime_sec; struct timespec64 wall_to_monotonic; @@ -99,43 +122,28 @@ struct timekeeper { ktime_t offs_boot; ktime_t offs_tai; s32 tai_offset; + + /* Cacheline 2: */ + struct tk_read_base tkr_raw; + u64 raw_sec; + + /* Cachline 3 and 4 (timekeeping internal variables): */ unsigned int clock_was_set_seq; u8 cs_was_changed_seq; - ktime_t next_leap_ktime; - u64 raw_sec; + struct timespec64 monotonic_to_boot; - /* The following members are for timekeeping internal use */ u64 cycle_interval; u64 xtime_interval; s64 xtime_remainder; u64 raw_interval; - /* The ntp_tick_length() value currently being used. - * This cached copy ensures we consistently apply the tick - * length for an entire tick, as ntp_tick_length may change - * mid-tick, and we don't want to apply that new value to - * the tick in progress. - */ + + ktime_t next_leap_ktime; u64 ntp_tick; - /* Difference between accumulated time and NTP time in ntp - * shifted nano seconds. */ s64 ntp_error; u32 ntp_error_shift; u32 ntp_err_mult; - /* Flag used to avoid updating NTP twice with same second */ u32 skip_second_overflow; -#ifdef CONFIG_DEBUG_TIMEKEEPING - long last_warning; - /* - * These simple flag variables are managed - * without locks, which is racy, but they are - * ok since we don't really care about being - * super precise about how many events were - * seen, just that a problem was observed. - */ - int underflow_seen; - int overflow_seen; -#endif }; #ifdef CONFIG_GENERIC_TIME_VSYSCALL diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index fc12a9ba2c88..0e035f675efe 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h @@ -45,6 +45,11 @@ extern void ktime_get_real_ts64(struct timespec64 *tv); extern void ktime_get_coarse_ts64(struct timespec64 *ts); extern void ktime_get_coarse_real_ts64(struct timespec64 *ts); +/* Multigrain timestamp interfaces */ +extern void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts); +extern void ktime_get_real_ts64_mg(struct timespec64 *ts); +extern unsigned long timekeeping_get_mg_floor_swaps(void); + void getboottime64(struct timespec64 *ts); /* @@ -275,6 +280,7 @@ struct ktime_timestamps { * counter value * @cycles: Clocksource counter value to produce the system times * @real: Realtime system time + * @boot: Boot time * @raw: Monotonic raw system time * @cs_id: Clocksource ID * @clock_was_set_seq: The sequence number of clock-was-set events @@ -283,6 +289,7 @@ struct ktime_timestamps { struct system_time_snapshot { u64 cycles; ktime_t real; + ktime_t boot; ktime_t raw; enum clocksource_ids cs_id; unsigned int clock_was_set_seq; diff --git a/include/linux/timex.h b/include/linux/timex.h index 3871b06bd302..4ee32eff3f22 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h @@ -139,14 +139,6 @@ unsigned long random_get_entropy_fallback(void); #define MAXSEC 2048 /* max interval between updates (s) */ #define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */ -/* - * kernel variables - * Note: maximum error = NTP sync distance = dispersion + delay / 2; - * estimated error = NTP dispersion. - */ -extern unsigned long tick_usec; /* USER_HZ period (usec) */ -extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ - /* Required to safely shift negative values */ #define shift_right(x, s) ({ \ __typeof__(x) __x = (x); \ diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 7d68a5cc5881..891368e82558 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h @@ -157,7 +157,7 @@ struct tcg_algorithm_info { * Return: size of the event on success, 0 on failure */ -static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, +static __always_inline u32 __calc_tpm2_event_size(struct tcg_pcr_event2_head *event, struct tcg_pcr_event *event_header, bool do_mapping) { diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index 42bedcddd511..2a5df5b62cfc 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -184,7 +184,7 @@ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status); enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, - TRACE_FLAG_IRQS_NOSUPPORT = 0x02, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x02, TRACE_FLAG_NEED_RESCHED = 0x04, TRACE_FLAG_HARDIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x10, @@ -193,7 +193,6 @@ enum trace_flag_type { TRACE_FLAG_BH_OFF = 0x80, }; -#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) { unsigned int irq_status = irqs_disabled_flags(irqflags) ? @@ -207,17 +206,6 @@ static inline unsigned int tracing_gen_ctx(void) local_save_flags(irqflags); return tracing_gen_ctx_flags(irqflags); } -#else - -static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags) -{ - return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); -} -static inline unsigned int tracing_gen_ctx(void) -{ - return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT); -} -#endif static inline unsigned int tracing_gen_ctx_dec(void) { @@ -326,7 +314,6 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer, void trace_event_buffer_commit(struct trace_event_buffer *fbuffer); enum { - TRACE_EVENT_FL_FILTERED_BIT, TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, @@ -341,7 +328,6 @@ enum { /* * Event flags: - * FILTERED - The event has a filter attached * CAP_ANY - Any user can enable for perf * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file @@ -356,7 +342,6 @@ enum { * to a tracepoint yet, then it is cleared when it is. */ enum { - TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), @@ -381,7 +366,6 @@ struct trace_event_call { }; struct trace_event event; char *print_fmt; - struct event_filter *filter; /* * Static events can disappear with modules, * where as dynamic ones need their own ref count. diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 4dc4955f0fbf..aebf0571c736 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h @@ -29,16 +29,22 @@ struct tracepoint_func { int prio; }; +struct tracepoint_ext { + int (*regfunc)(void); + void (*unregfunc)(void); + /* Flags. */ + unsigned int faultable:1; +}; + struct tracepoint { const char *name; /* Tracepoint name */ - struct static_key key; + struct static_key_false key; struct static_call_key *static_call_key; void *static_call_tramp; void *iterator; void *probestub; - int (*regfunc)(void); - void (*unregfunc)(void); struct tracepoint_func __rcu *funcs; + struct tracepoint_ext *ext; }; #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS @@ -83,7 +89,7 @@ struct bpf_raw_event_map { #ifdef CONFIG_TRACEPOINTS # define tracepoint_enabled(tp) \ - static_key_false(&(__tracepoint_##tp).key) + static_branch_unlikely(&(__tracepoint_##tp).key) #else # define tracepoint_enabled(tracepoint) false #endif diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 93a9f3070b48..76d9055b2cff 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/types.h> #include <linux/rcupdate.h> +#include <linux/rcupdate_trace.h> #include <linux/tracepoint-defs.h> #include <linux/static_call.h> @@ -32,8 +33,6 @@ struct trace_eval_map { #define TRACEPOINT_DEFAULT_PRIO 10 -extern struct srcu_struct tracepoint_srcu; - extern int tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data); extern int @@ -105,16 +104,30 @@ void for_each_tracepoint_in_module(struct module *mod, * tracepoint_synchronize_unregister must be called between the last tracepoint * probe unregistration and the end of module exit to make sure there is no * caller executing a probe when it is freed. + * + * An alternative is to use the following for batch reclaim associated + * with a given tracepoint: + * + * - tracepoint_is_faultable() == false: call_rcu() + * - tracepoint_is_faultable() == true: call_rcu_tasks_trace() */ #ifdef CONFIG_TRACEPOINTS static inline void tracepoint_synchronize_unregister(void) { - synchronize_srcu(&tracepoint_srcu); + synchronize_rcu_tasks_trace(); synchronize_rcu(); } +static inline bool tracepoint_is_faultable(struct tracepoint *tp) +{ + return tp->ext && tp->ext->faultable; +} #else static inline void tracepoint_synchronize_unregister(void) { } +static inline bool tracepoint_is_faultable(struct tracepoint *tp) +{ + return false; +} #endif #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS @@ -197,65 +210,16 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #endif /* CONFIG_HAVE_STATIC_CALL */ /* - * ARCH_WANTS_NO_INSTR archs are expected to have sanitized entry and idle - * code that disallow any/all tracing/instrumentation when RCU isn't watching. - */ -#ifdef CONFIG_ARCH_WANTS_NO_INSTR -#define RCUIDLE_COND(rcuidle) (rcuidle) -#else -/* srcu can't be used from NMI */ -#define RCUIDLE_COND(rcuidle) (rcuidle && in_nmi()) -#endif - -/* - * it_func[0] is never NULL because there is at least one element in the array - * when the array itself is non NULL. + * Declare an exported function that Rust code can call to trigger this + * tracepoint. This function does not include the static branch; that is done + * in Rust to avoid a function call when the tracepoint is disabled. */ -#define __DO_TRACE(name, args, cond, rcuidle) \ - do { \ - int __maybe_unused __idx = 0; \ - \ - if (!(cond)) \ - return; \ - \ - if (WARN_ONCE(RCUIDLE_COND(rcuidle), \ - "Bad RCU usage for tracepoint")) \ - return; \ - \ - /* keep srcu and sched-rcu usage consistent */ \ - preempt_disable_notrace(); \ - \ - /* \ - * For rcuidle callers, use srcu since sched-rcu \ - * doesn't work from the idle path. \ - */ \ - if (rcuidle) { \ - __idx = srcu_read_lock_notrace(&tracepoint_srcu);\ - ct_irq_enter_irqson(); \ - } \ - \ - __DO_TRACE_CALL(name, TP_ARGS(args)); \ - \ - if (rcuidle) { \ - ct_irq_exit_irqson(); \ - srcu_read_unlock_notrace(&tracepoint_srcu, __idx);\ - } \ - \ - preempt_enable_notrace(); \ - } while (0) - -#ifndef MODULE -#define __DECLARE_TRACE_RCU(name, proto, args, cond) \ - static inline void trace_##name##_rcuidle(proto) \ +#define DEFINE_RUST_DO_TRACE(name, proto, args) +#define __DEFINE_RUST_DO_TRACE(name, proto, args) \ + notrace void rust_do_trace_##name(proto) \ { \ - if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 1); \ + __rust_do_trace_##name(args); \ } -#else -#define __DECLARE_TRACE_RCU(name, proto, args, cond) -#endif /* * Make sure the alignment of the structure in the __tracepoints section will @@ -268,23 +232,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * site if it is not watching, as it will need to be active when the * tracepoint is enabled. */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ extern int __traceiter_##name(data_proto); \ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \ extern struct tracepoint __tracepoint_##name; \ - static inline void trace_##name(proto) \ - { \ - if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(name, \ - TP_ARGS(args), \ - TP_CONDITION(cond), 0); \ - if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ - WARN_ONCE(!rcu_is_watching(), \ - "RCU not watching for tracepoint"); \ - } \ - } \ - __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ - PARAMS(cond)) \ + extern void rust_do_trace_##name(proto); \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ @@ -311,15 +263,61 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) static inline bool \ trace_##name##_enabled(void) \ { \ - return static_key_false(&__tracepoint_##name.key); \ + return static_branch_unlikely(&__tracepoint_##name.key);\ + } + +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ + static inline void __rust_do_trace_##name(proto) \ + { \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + } \ + static inline void trace_##name(proto) \ + { \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + if (cond) { \ + guard(preempt_notrace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + } \ + if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ + WARN_ONCE(!rcu_is_watching(), \ + "RCU not watching for tracepoint"); \ + } \ + } + +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) \ + static inline void __rust_do_trace_##name(proto) \ + { \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + static inline void trace_##name(proto) \ + { \ + might_fault(); \ + if (static_branch_unlikely(&__tracepoint_##name.key)) { \ + guard(rcu_tasks_trace)(); \ + __DO_TRACE_CALL(name, TP_ARGS(args)); \ + } \ + if (IS_ENABLED(CONFIG_LOCKDEP)) { \ + WARN_ONCE(!rcu_is_watching(), \ + "RCU not watching for tracepoint"); \ + } \ } /* * We have no guarantee that gcc and the linker won't up-align the tracepoint * structures, so we create an array of pointers that will be used for iteration * on the tracepoints. + * + * it_func[0] is never NULL because there is at least one element in the array + * when the array itself is non NULL. */ -#define DEFINE_TRACE_FN(_name, _reg, _unreg, proto, args) \ +#define __DEFINE_TRACE_EXT(_name, _ext, proto, args) \ static const char __tpstrtab_##_name[] \ __section("__tracepoints_strings") = #_name; \ extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \ @@ -328,14 +326,14 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) struct tracepoint __tracepoint_##_name __used \ __section("__tracepoints") = { \ .name = __tpstrtab_##_name, \ - .key = STATIC_KEY_INIT_FALSE, \ + .key = STATIC_KEY_FALSE_INIT, \ .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \ .static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \ .iterator = &__traceiter_##_name, \ .probestub = &__probestub_##_name, \ - .regfunc = _reg, \ - .unregfunc = _unreg, \ - .funcs = NULL }; \ + .funcs = NULL, \ + .ext = _ext, \ + }; \ __TRACEPOINT_ENTRY(_name); \ int __traceiter_##_name(void *__data, proto) \ { \ @@ -356,10 +354,27 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) void __probestub_##_name(void *__data, proto) \ { \ } \ - DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); + DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name); \ + DEFINE_RUST_DO_TRACE(_name, TP_PROTO(proto), TP_ARGS(args)) -#define DEFINE_TRACE(name, proto, args) \ - DEFINE_TRACE_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args)); +#define DEFINE_TRACE_FN(_name, _reg, _unreg, _proto, _args) \ + static struct tracepoint_ext __tracepoint_ext_##_name = { \ + .regfunc = _reg, \ + .unregfunc = _unreg, \ + .faultable = false, \ + }; \ + __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args)); + +#define DEFINE_TRACE_SYSCALL(_name, _reg, _unreg, _proto, _args) \ + static struct tracepoint_ext __tracepoint_ext_##_name = { \ + .regfunc = _reg, \ + .unregfunc = _unreg, \ + .faultable = true, \ + }; \ + __DEFINE_TRACE_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args)); + +#define DEFINE_TRACE(_name, _proto, _args) \ + __DEFINE_TRACE_EXT(_name, NULL, PARAMS(_proto), PARAMS(_args)); #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) \ EXPORT_SYMBOL_GPL(__tracepoint_##name); \ @@ -372,11 +387,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) #else /* !TRACEPOINTS_ENABLED */ -#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ +#define __DECLARE_TRACE_COMMON(name, proto, args, data_proto) \ static inline void trace_##name(proto) \ { } \ - static inline void trace_##name##_rcuidle(proto) \ - { } \ static inline int \ register_trace_##name(void (*probe)(data_proto), \ void *data) \ @@ -398,7 +411,14 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) return false; \ } +#define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) + +#define __DECLARE_TRACE_SYSCALL(name, proto, args, data_proto) \ + __DECLARE_TRACE_COMMON(name, PARAMS(proto), PARAMS(args), PARAMS(data_proto)) + #define DEFINE_TRACE_FN(name, reg, unreg, proto, args) +#define DEFINE_TRACE_SYSCALL(name, reg, unreg, proto, args) #define DEFINE_TRACE(name, proto, args) #define EXPORT_TRACEPOINT_SYMBOL_GPL(name) #define EXPORT_TRACEPOINT_SYMBOL(name) @@ -459,6 +479,10 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) cpu_online(raw_smp_processor_id()) && (PARAMS(cond)), \ PARAMS(void *__data, proto)) +#define DECLARE_TRACE_SYSCALL(name, proto, args) \ + __DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args), \ + PARAMS(void *__data, proto)) + #define TRACE_EVENT_FLAGS(event, flag) #define TRACE_EVENT_PERF_PERM(event, expr...) @@ -596,6 +620,9 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) struct, assign, print) \ DECLARE_TRACE_CONDITION(name, PARAMS(proto), \ PARAMS(args), PARAMS(cond)) +#define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, \ + print, reg, unreg) \ + DECLARE_TRACE_SYSCALL(name, PARAMS(proto), PARAMS(args)) #define TRACE_EVENT_FLAGS(event, flag) diff --git a/include/linux/types.h b/include/linux/types.h index 2bc8766ba20c..2d7b9ae8714c 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -115,8 +115,9 @@ typedef u64 u_int64_t; typedef s64 int64_t; #endif -/* this is a special 64bit data type that is 8-byte aligned */ +/* These are the special 64-bit data types that are 8-byte aligned */ #define aligned_u64 __aligned_u64 +#define aligned_s64 __aligned_s64 #define aligned_be64 __aligned_be64 #define aligned_le64 __aligned_le64 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 43844510d5d0..e9c702c1908d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -403,6 +403,103 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, return 0; } +/** + * copy_struct_to_user: copy a struct to userspace + * @dst: Destination address, in userspace. This buffer must be @ksize + * bytes long. + * @usize: (Alleged) size of @dst struct. + * @src: Source address, in kernel space. + * @ksize: Size of @src struct. + * @ignored_trailing: Set to %true if there was a non-zero byte in @src that + * userspace cannot see because they are using an smaller struct. + * + * Copies a struct from kernel space to userspace, in a way that guarantees + * backwards-compatibility for struct syscall arguments (as long as future + * struct extensions are made such that all new fields are *appended* to the + * old struct, and zeroed-out new fields have the same meaning as the old + * struct). + * + * Some syscalls may wish to make sure that userspace knows about everything in + * the struct, and if there is a non-zero value that userspce doesn't know + * about, they want to return an error (such as -EMSGSIZE) or have some other + * fallback (such as adding a "you're missing some information" flag). If + * @ignored_trailing is non-%NULL, it will be set to %true if there was a + * non-zero byte that could not be copied to userspace (ie. was past @usize). + * + * While unconditionally returning an error in this case is the simplest + * solution, for maximum backward compatibility you should try to only return + * -EMSGSIZE if the user explicitly requested the data that couldn't be copied. + * Note that structure sizes can change due to header changes and simple + * recompilations without code changes(!), so if you care about + * @ignored_trailing you probably want to make sure that any new field data is + * associated with a flag. Otherwise you might assume that a program knows + * about data it does not. + * + * @ksize is just sizeof(*src), and @usize should've been passed by userspace. + * The recommended usage is something like the following: + * + * SYSCALL_DEFINE2(foobar, struct foo __user *, uarg, size_t, usize) + * { + * int err; + * bool ignored_trailing; + * struct foo karg = {}; + * + * if (usize > PAGE_SIZE) + * return -E2BIG; + * if (usize < FOO_SIZE_VER0) + * return -EINVAL; + * + * // ... modify karg somehow ... + * + * err = copy_struct_to_user(uarg, usize, &karg, sizeof(karg), + * &ignored_trailing); + * if (err) + * return err; + * if (ignored_trailing) + * return -EMSGSIZE: + * + * // ... + * } + * + * There are three cases to consider: + * * If @usize == @ksize, then it's copied verbatim. + * * If @usize < @ksize, then the kernel is trying to pass userspace a newer + * struct than it supports. Thus we only copy the interoperable portions + * (@usize) and ignore the rest (but @ignored_trailing is set to %true if + * any of the trailing (@ksize - @usize) bytes are non-zero). + * * If @usize > @ksize, then the kernel is trying to pass userspace an older + * struct than userspace supports. In order to make sure the + * unknown-to-the-kernel fields don't contain garbage values, we zero the + * trailing (@usize - @ksize) bytes. + * + * Returns (in all cases, some data may have been copied): + * * -EFAULT: access to userspace failed. + */ +static __always_inline __must_check int +copy_struct_to_user(void __user *dst, size_t usize, const void *src, + size_t ksize, bool *ignored_trailing) +{ + size_t size = min(ksize, usize); + size_t rest = max(ksize, usize) - size; + + /* Double check if ksize is larger than a known object size. */ + if (WARN_ON_ONCE(ksize > __builtin_object_size(src, 1))) + return -E2BIG; + + /* Deal with trailing bytes. */ + if (usize > ksize) { + if (clear_user(dst + size, rest)) + return -EFAULT; + } + if (ignored_trailing) + *ignored_trailing = ksize < usize && + memchr_inv(src + size, 0, rest) != NULL; + /* Copy the interoperable parts of the struct. */ + if (copy_to_user(dst, src, size)) + return -EFAULT; + return 0; +} + bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size); long copy_from_kernel_nofault(void *dst, const void *src, size_t size); diff --git a/include/linux/udp.h b/include/linux/udp.h index 3eb3f2b9a2a0..0807e21cfec9 100644 --- a/include/linux/udp.h +++ b/include/linux/udp.h @@ -56,6 +56,12 @@ struct udp_sock { int pending; /* Any pending frames ? */ __u8 encap_type; /* Is this an Encapsulation socket? */ +#if !IS_ENABLED(CONFIG_BASE_SMALL) + /* For UDP 4-tuple hash */ + __u16 udp_lrpa_hash; + struct hlist_nulls_node udp_lrpa_node; +#endif + /* * Following member retains the information to create a UDP header * when the socket is uncorked. @@ -206,6 +212,11 @@ static inline void udp_allow_gso(struct sock *sk) #define udp_portaddr_for_each_entry_rcu(__sk, list) \ hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node) +#if !IS_ENABLED(CONFIG_BASE_SMALL) +#define udp_lrpa_for_each_entry_rcu(__up, node, list) \ + hlist_nulls_for_each_entry_rcu(__up, node, list, udp_lrpa_node) +#endif + #define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE) #endif /* _LINUX_UDP_H */ diff --git a/include/linux/unicode.h b/include/linux/unicode.h index 4d39e6e11a95..5e6b212a2aed 100644 --- a/include/linux/unicode.h +++ b/include/linux/unicode.h @@ -16,6 +16,8 @@ struct utf8data_table; ((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \ ((unsigned int)(REV))) +#define UTF8_LATEST UNICODE_AGE(12, 1, 0) + static inline u8 unicode_major(unsigned int age) { return (age >> UNICODE_MAJ_SHIFT) & 0xff; @@ -76,4 +78,6 @@ int utf8_casefold_hash(const struct unicode_map *um, const void *salt, struct unicode_map *utf8_load(unsigned int version); void utf8_unload(struct unicode_map *um); +int utf8_parse_version(char *version); + #endif /* _LINUX_UNICODE_H */ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 2b294bf1881f..e0a4c2082245 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -15,6 +15,7 @@ #include <linux/rbtree.h> #include <linux/types.h> #include <linux/wait.h> +#include <linux/timer.h> struct uprobe; struct vm_area_struct; @@ -23,8 +24,17 @@ struct inode; struct notifier_block; struct page; +/* + * Allowed return values from uprobe consumer's handler callback + * with following meaning: + * + * UPROBE_HANDLER_REMOVE + * - Remove the uprobe breakpoint from current->mm. + * UPROBE_HANDLER_IGNORE + * - Ignore ret_handler callback for this consumer. + */ #define UPROBE_HANDLER_REMOVE 1 -#define UPROBE_HANDLER_MASK 1 +#define UPROBE_HANDLER_IGNORE 2 #define MAX_URETPROBE_DEPTH 64 @@ -37,13 +47,15 @@ struct uprobe_consumer { * for the current process. If filter() is omitted or returns true, * UPROBE_HANDLER_REMOVE is effectively ignored. */ - int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); + int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs, __u64 *data); int (*ret_handler)(struct uprobe_consumer *self, unsigned long func, - struct pt_regs *regs); + struct pt_regs *regs, __u64 *data); bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); struct list_head cons_node; + + __u64 id; /* set when uprobe_consumer is registered */ }; #ifdef CONFIG_UPROBES @@ -56,12 +68,62 @@ enum uprobe_task_state { UTASK_SSTEP_TRAPPED, }; +/* The state of hybrid-lifetime uprobe inside struct return_instance */ +enum hprobe_state { + HPROBE_LEASED, /* uretprobes_srcu-protected uprobe */ + HPROBE_STABLE, /* refcounted uprobe */ + HPROBE_GONE, /* NULL uprobe, SRCU expired, refcount failed */ + HPROBE_CONSUMED, /* uprobe "consumed" by uretprobe handler */ +}; + +/* + * Hybrid lifetime uprobe. Represents a uprobe instance that could be either + * SRCU protected (with SRCU protection eventually potentially timing out), + * refcounted using uprobe->ref, or there could be no valid uprobe (NULL). + * + * hprobe's internal state is setup such that background timer thread can + * atomically "downgrade" temporarily RCU-protected uprobe into refcounted one + * (or no uprobe, if refcounting failed). + * + * *stable* pointer always point to the uprobe (or could be NULL if there is + * was no valid underlying uprobe to begin with). + * + * *leased* pointer is the key to achieving race-free atomic lifetime state + * transition and can have three possible states: + * - either the same non-NULL value as *stable*, in which case uprobe is + * SRCU-protected; + * - NULL, in which case uprobe (if there is any) is refcounted; + * - special __UPROBE_DEAD value, which represents an uprobe that was SRCU + * protected initially, but SRCU period timed out and we attempted to + * convert it to refcounted, but refcount_inc_not_zero() failed, because + * uprobe effectively went away (the last consumer unsubscribed). In this + * case it's important to know that *stable* pointer (which still has + * non-NULL uprobe pointer) shouldn't be used, because lifetime of + * underlying uprobe is not guaranteed anymore. __UPROBE_DEAD is just an + * internal marker and is handled transparently by hprobe_fetch() helper. + * + * When uprobe is SRCU-protected, we also record srcu_idx value, necessary for + * SRCU unlocking. + * + * See hprobe_expire() and hprobe_fetch() for details of race-free uprobe + * state transitioning details. It all hinges on atomic xchg() over *leaded* + * pointer. *stable* pointer, once initially set, is not modified concurrently. + */ +struct hprobe { + enum hprobe_state state; + int srcu_idx; + struct uprobe *uprobe; +}; + /* * uprobe_task: Metadata of a task while it singlesteps. */ struct uprobe_task { enum uprobe_task_state state; + unsigned int depth; + struct return_instance *return_instances; + union { struct { struct arch_uprobe_task autask; @@ -75,23 +137,30 @@ struct uprobe_task { }; struct uprobe *active_uprobe; + struct timer_list ri_timer; unsigned long xol_vaddr; struct arch_uprobe *auprobe; +}; - struct return_instance *return_instances; - unsigned int depth; +struct return_consumer { + __u64 cookie; + __u64 id; }; struct return_instance { - struct uprobe *uprobe; + struct hprobe hprobe; unsigned long func; unsigned long stack; /* stack pointer */ unsigned long orig_ret_vaddr; /* original return address */ bool chained; /* true, if instance is nested */ + int consumers_cnt; struct return_instance *next; /* keep as stack */ -}; + struct rcu_head rcu; + + struct return_consumer consumers[] __counted_by(consumers_cnt); +} ____cacheline_aligned; enum rp_check { RP_CHECK_CALL, diff --git a/include/linux/usb.h b/include/linux/usb.h index 672d8fc2abdb..cfa8005e24f9 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1129,8 +1129,8 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size) /* ----------------------------------------------------------------------- */ /* Stuff for dynamic usb ids */ +extern struct mutex usb_dynids_lock; struct usb_dynids { - spinlock_t lock; struct list_head list; }; @@ -1243,7 +1243,7 @@ struct usb_driver { unsigned int disable_hub_initiated_lpm:1; unsigned int soft_unbind:1; }; -#define to_usb_driver(d) container_of(d, struct usb_driver, driver) +#define to_usb_driver(d) container_of_const(d, struct usb_driver, driver) /** * struct usb_device_driver - identifies USB device driver to usbcore @@ -1294,8 +1294,7 @@ struct usb_device_driver { unsigned int supports_autosuspend:1; unsigned int generic_subclass:1; }; -#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \ - driver) +#define to_usb_device_driver(d) container_of_const(d, struct usb_device_driver, driver) /** * struct usb_class_driver - identifies a USB driver that wants to use the USB major number diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h index 5a7f96684ea2..ebdfef124b2b 100644 --- a/include/linux/usb/chipidea.h +++ b/include/linux/usb/chipidea.h @@ -65,6 +65,7 @@ struct ci_hdrc_platform_data { #define CI_HDRC_PHY_VBUS_CONTROL BIT(16) #define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17) #define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18) +#define CI_HDRC_HAS_SHORT_PKT_LIMIT BIT(19) enum usb_dr_mode dr_mode; #define CI_HDRC_CONTROLLER_RESET_EVENT 0 #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1 diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h index 2827ce72e502..8539956bc2be 100644 --- a/include/linux/usb/storage.h +++ b/include/linux/usb/storage.h @@ -53,7 +53,7 @@ struct bulk_cb_wrap { __le32 Signature; /* contains 'USBC' */ __u32 Tag; /* unique per command id */ __le32 DataTransferLength; /* size of data */ - __u8 Flags; /* direction in bit 0 */ + __u8 Flags; /* direction in bit 7 */ __u8 Lun; /* LUN normally 0 */ __u8 Length; /* length of the CDB */ __u8 CDB[16]; /* max command */ diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 549275f8ac1b..d616b8807000 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -87,6 +87,17 @@ enum typec_orientation { TYPEC_ORIENTATION_REVERSE, }; +enum usb_mode { + USB_MODE_NONE, + USB_MODE_USB2, + USB_MODE_USB3, + USB_MODE_USB4 +}; + +#define USB_CAPABILITY_USB2 BIT(0) +#define USB_CAPABILITY_USB3 BIT(1) +#define USB_CAPABILITY_USB4 BIT(2) + /* * struct enter_usb_data - Enter_USB Message details * @eudo: Enter_USB Data Object @@ -209,6 +220,7 @@ struct typec_cable_desc { * @accessory: Audio, Debug or none. * @identity: Discover Identity command data * @pd_revision: USB Power Delivery Specification Revision if supported + * @usb_capability: Supported USB Modes * @attach: Notification about attached USB device * @deattach: Notification about removed USB device * @@ -226,6 +238,7 @@ struct typec_partner_desc { enum typec_accessory accessory; struct usb_pd_identity *identity; u16 pd_revision; /* 0300H = "3.0" */ + u8 usb_capability; void (*attach)(struct typec_partner *partner, struct device *dev); void (*deattach)(struct typec_partner *partner, struct device *dev); @@ -240,6 +253,8 @@ struct typec_partner_desc { * @port_type_set: Set port type * @pd_get: Get available USB Power Delivery Capabilities. * @pd_set: Set USB Power Delivery Capabilities. + * @default_usb_mode_set: USB Mode to be used by default with Enter_USB Message + * @enter_usb_mode: Change the active USB Mode */ struct typec_operations { int (*try_role)(struct typec_port *port, int role); @@ -250,6 +265,8 @@ struct typec_operations { enum typec_port_type type); struct usb_power_delivery **(*pd_get)(struct typec_port *port); int (*pd_set)(struct typec_port *port, struct usb_power_delivery *pd); + int (*default_usb_mode_set)(struct typec_port *port, enum usb_mode mode); + int (*enter_usb_mode)(struct typec_port *port, enum usb_mode mode); }; enum usb_pd_svdm_ver { @@ -267,6 +284,7 @@ enum usb_pd_svdm_ver { * @svdm_version: USB PD Structured VDM version if supported * @prefer_role: Initial role preference (DRP ports). * @accessory: Supported Accessory Modes + * @usb_capability: Supported USB Modes * @fwnode: Optional fwnode of the port * @driver_data: Private pointer for driver specific info * @pd: Optional USB Power Delivery Support @@ -283,6 +301,7 @@ struct typec_capability { int prefer_role; enum typec_accessory accessory[TYPEC_MAX_ACCESSORY]; unsigned int orientation_aware:1; + u8 usb_capability; struct fwnode_handle *fwnode; void *driver_data; @@ -350,6 +369,9 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_ int typec_partner_set_usb_power_delivery(struct typec_partner *partner, struct usb_power_delivery *pd); +void typec_partner_set_usb_mode(struct typec_partner *partner, enum usb_mode usb_mode); +void typec_port_set_usb_mode(struct typec_port *port, enum usb_mode mode); + /** * struct typec_connector - Representation of Type-C port for external drivers * @attach: notification about device removal diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h index 88d96095bcb1..bce95153e5a6 100644 --- a/include/linux/usb/uvc.h +++ b/include/linux/usb/uvc.h @@ -118,6 +118,9 @@ #define UVC_GUID_FORMAT_Y12I \ { 'Y', '1', '2', 'I', 0x00, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_Y16I \ + { 'Y', '1', '6', 'I', 0x00, 0x00, 0x10, 0x00, \ + 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} #define UVC_GUID_FORMAT_Z16 \ { 'Z', '1', '6', ' ', 0x00, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} @@ -140,6 +143,9 @@ #define UVC_GUID_FORMAT_D3DFMT_L8 \ {0x32, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} +#define UVC_GUID_FORMAT_D3DFMT_R5G6B5 \ + {0x7b, 0xeb, 0x36, 0xe4, 0x4f, 0x52, 0xce, 0x11, \ + 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70} #define UVC_GUID_FORMAT_KSMEDIA_L8_IR \ {0x32, 0x00, 0x00, 0x00, 0x02, 0x00, 0x10, 0x00, \ 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71} diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h index 6bb460c3e818..825487fb66fa 100644 --- a/include/linux/util_macros.h +++ b/include/linux/util_macros.h @@ -4,19 +4,6 @@ #include <linux/math.h> -#define __find_closest(x, a, as, op) \ -({ \ - typeof(as) __fc_i, __fc_as = (as) - 1; \ - typeof(x) __fc_x = (x); \ - typeof(*a) const *__fc_a = (a); \ - for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ - if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \ - __fc_a[__fc_i + 1], 2)) \ - break; \ - } \ - (__fc_i); \ -}) - /** * find_closest - locate the closest element in a sorted array * @x: The reference value. @@ -25,8 +12,27 @@ * @as: Size of 'a'. * * Returns the index of the element closest to 'x'. + * Note: If using an array of negative numbers (or mixed positive numbers), + * then be sure that 'x' is of a signed-type to get good results. */ -#define find_closest(x, a, as) __find_closest(x, a, as, <=) +#define find_closest(x, a, as) \ +({ \ + typeof(as) __fc_i, __fc_as = (as) - 1; \ + long __fc_mid_x, __fc_x = (x); \ + long __fc_left, __fc_right; \ + typeof(*a) const *__fc_a = (a); \ + for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \ + __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i + 1]) / 2; \ + if (__fc_x <= __fc_mid_x) { \ + __fc_left = __fc_x - __fc_a[__fc_i]; \ + __fc_right = __fc_a[__fc_i + 1] - __fc_x; \ + if (__fc_right < __fc_left) \ + __fc_i++; \ + break; \ + } \ + } \ + (__fc_i); \ +}) /** * find_closest_descending - locate the closest element in a sorted array @@ -36,9 +42,27 @@ * @as: Size of 'a'. * * Similar to find_closest() but 'a' is expected to be sorted in descending - * order. + * order. The iteration is done in reverse order, so that the comparison + * of '__fc_right' & '__fc_left' also works for unsigned numbers. */ -#define find_closest_descending(x, a, as) __find_closest(x, a, as, >=) +#define find_closest_descending(x, a, as) \ +({ \ + typeof(as) __fc_i, __fc_as = (as) - 1; \ + long __fc_mid_x, __fc_x = (x); \ + long __fc_left, __fc_right; \ + typeof(*a) const *__fc_a = (a); \ + for (__fc_i = __fc_as; __fc_i >= 1; __fc_i--) { \ + __fc_mid_x = (__fc_a[__fc_i] + __fc_a[__fc_i - 1]) / 2; \ + if (__fc_x <= __fc_mid_x) { \ + __fc_left = __fc_x - __fc_a[__fc_i]; \ + __fc_right = __fc_a[__fc_i - 1] - __fc_x; \ + if (__fc_right < __fc_left) \ + __fc_i--; \ + break; \ + } \ + } \ + (__fc_i); \ +}) /** * is_insidevar - check if the @ptr points inside the @var memory range. diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 306137a15d07..57cc4b07fd17 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -56,6 +56,17 @@ int virtqueue_add_inbuf_ctx(struct virtqueue *vq, void *ctx, gfp_t gfp); +int virtqueue_add_inbuf_premapped(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + void *ctx, + gfp_t gfp); + +int virtqueue_add_outbuf_premapped(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + gfp_t gfp); + int virtqueue_add_sgs(struct virtqueue *vq, struct scatterlist *sgs[], unsigned int out_sgs, @@ -82,8 +93,6 @@ bool virtqueue_enable_cb(struct virtqueue *vq); unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); -int virtqueue_set_dma_premapped(struct virtqueue *_vq); - bool virtqueue_poll(struct virtqueue *vq, unsigned); bool virtqueue_enable_cb_delayed(struct virtqueue *vq); @@ -111,6 +120,7 @@ struct virtio_admin_cmd { struct scatterlist *data_sg; struct scatterlist *result_sg; struct completion completion; + u32 result_sg_size; int ret; }; diff --git a/include/linux/virtio_pci_admin.h b/include/linux/virtio_pci_admin.h index f4a100a0fe2e..dffc92c17ad2 100644 --- a/include/linux/virtio_pci_admin.h +++ b/include/linux/virtio_pci_admin.h @@ -20,4 +20,15 @@ int virtio_pci_admin_legacy_io_notify_info(struct pci_dev *pdev, u64 *bar_offset); #endif +bool virtio_pci_admin_has_dev_parts(struct pci_dev *pdev); +int virtio_pci_admin_mode_set(struct pci_dev *pdev, u8 mode); +int virtio_pci_admin_obj_create(struct pci_dev *pdev, u16 obj_type, u8 operation_type, + u32 *obj_id); +int virtio_pci_admin_obj_destroy(struct pci_dev *pdev, u16 obj_type, u32 id); +int virtio_pci_admin_dev_parts_metadata_get(struct pci_dev *pdev, u16 obj_type, + u32 id, u8 metadata_type, u32 *out); +int virtio_pci_admin_dev_parts_get(struct pci_dev *pdev, u16 obj_type, u32 id, + u8 get_type, struct scatterlist *res_sg, u32 *res_size); +int virtio_pci_admin_dev_parts_set(struct pci_dev *pdev, struct scatterlist *data_sg); + #endif /* _LINUX_VIRTIO_PCI_ADMIN_H */ diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index ad2ce7a6ab7a..31e9ffd936e3 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -134,12 +134,6 @@ extern void vm_unmap_ram(const void *mem, unsigned int count); extern void *vm_map_ram(struct page **pages, unsigned int count, int node); extern void vm_unmap_aliases(void); -#ifdef CONFIG_MMU -extern unsigned long vmalloc_nr_pages(void); -#else -static inline unsigned long vmalloc_nr_pages(void) { return 0; } -#endif - extern void *vmalloc_noprof(unsigned long size) __alloc_size(1); #define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__)) @@ -208,6 +202,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma, extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff); +int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot, + struct page **pages, unsigned int page_shift); + /* * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings() @@ -266,12 +263,29 @@ static inline bool is_vm_area_hugepages(const void *addr) #endif } +/* for /proc/kcore */ +long vread_iter(struct iov_iter *iter, const char *addr, size_t count); + +/* + * Internals. Don't use.. + */ +__init void vm_area_add_early(struct vm_struct *vm); +__init void vm_area_register_early(struct vm_struct *vm, size_t align); + +int register_vmap_purge_notifier(struct notifier_block *nb); +int unregister_vmap_purge_notifier(struct notifier_block *nb); + #ifdef CONFIG_MMU +#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) + +unsigned long vmalloc_nr_pages(void); + int vm_area_map_pages(struct vm_struct *area, unsigned long start, unsigned long end, struct page **pages); void vm_area_unmap_pages(struct vm_struct *area, unsigned long start, unsigned long end); void vunmap_range(unsigned long addr, unsigned long end); + static inline void set_vm_flush_reset_perms(void *addr) { struct vm_struct *vm = find_vm_area(addr); @@ -279,24 +293,14 @@ static inline void set_vm_flush_reset_perms(void *addr) if (vm) vm->flags |= VM_FLUSH_RESET_PERMS; } +#else /* !CONFIG_MMU */ +#define VMALLOC_TOTAL 0UL -#else -static inline void set_vm_flush_reset_perms(void *addr) -{ -} -#endif - -/* for /proc/kcore */ -extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count); - -/* - * Internals. Don't use.. - */ -extern __init void vm_area_add_early(struct vm_struct *vm); -extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); +static inline unsigned long vmalloc_nr_pages(void) { return 0; } +static inline void set_vm_flush_reset_perms(void *addr) {} +#endif /* CONFIG_MMU */ -#ifdef CONFIG_SMP -# ifdef CONFIG_MMU +#if defined(CONFIG_MMU) && defined(CONFIG_SMP) struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, const size_t *sizes, int nr_vms, size_t align); @@ -311,22 +315,9 @@ pcpu_get_vm_areas(const unsigned long *offsets, return NULL; } -static inline void -pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) -{ -} -# endif -#endif - -#ifdef CONFIG_MMU -#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) -#else -#define VMALLOC_TOTAL 0UL +static inline void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) {} #endif -int register_vmap_purge_notifier(struct notifier_block *nb); -int unregister_vmap_purge_notifier(struct notifier_block *nb); - #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK) bool vmalloc_dump_obj(void *object); #else diff --git a/include/linux/vt_buffer.h b/include/linux/vt_buffer.h index 919d999a8c1d..b6eeb8cb6070 100644 --- a/include/linux/vt_buffer.h +++ b/include/linux/vt_buffer.h @@ -28,45 +28,21 @@ #ifndef VT_BUF_HAVE_MEMSETW static inline void scr_memsetw(u16 *s, u16 c, unsigned int count) { -#ifdef VT_BUF_HAVE_RW - count /= 2; - while (count--) - scr_writew(c, s++); -#else memset16(s, c, count / 2); -#endif } #endif #ifndef VT_BUF_HAVE_MEMCPYW static inline void scr_memcpyw(u16 *d, const u16 *s, unsigned int count) { -#ifdef VT_BUF_HAVE_RW - count /= 2; - while (count--) - scr_writew(scr_readw(s++), d++); -#else memcpy(d, s, count); -#endif } #endif #ifndef VT_BUF_HAVE_MEMMOVEW static inline void scr_memmovew(u16 *d, const u16 *s, unsigned int count) { -#ifdef VT_BUF_HAVE_RW - if (d < s) - scr_memcpyw(d, s, count); - else { - count /= 2; - d += count; - s += count; - while (count--) - scr_writew(scr_readw(--s), --d); - } -#else memmove(d, s, count); -#endif } #endif diff --git a/include/linux/wait.h b/include/linux/wait.h index 8aa3372f21a0..6d90ad974408 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -221,6 +221,7 @@ void __wake_up_pollfree(struct wait_queue_head *wq_head); #define wake_up_all(x) __wake_up(x, TASK_NORMAL, 0, NULL) #define wake_up_locked(x) __wake_up_locked((x), TASK_NORMAL, 1) #define wake_up_all_locked(x) __wake_up_locked((x), TASK_NORMAL, 0) +#define wake_up_sync(x) __wake_up_sync(x, TASK_NORMAL) #define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL) #define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL) @@ -541,8 +542,8 @@ do { \ int __ret = 0; \ struct hrtimer_sleeper __t; \ \ - hrtimer_init_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ - HRTIMER_MODE_REL); \ + hrtimer_setup_sleeper_on_stack(&__t, CLOCK_MONOTONIC, \ + HRTIMER_MODE_REL); \ if ((timeout) != KTIME_MAX) { \ hrtimer_set_expires_range_ns(&__t.timer, timeout, \ current->timer_slack_ns); \ diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h index 7725b7579b78..9e29d79fc790 100644 --- a/include/linux/wait_bit.h +++ b/include/linux/wait_bit.h @@ -8,7 +8,7 @@ #include <linux/wait.h> struct wait_bit_key { - void *flags; + unsigned long *flags; int bit_nr; unsigned long timeout; }; @@ -23,14 +23,14 @@ struct wait_bit_queue_entry { typedef int wait_bit_action_f(struct wait_bit_key *key, int mode); -void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit); +void __wake_up_bit(struct wait_queue_head *wq_head, unsigned long *word, int bit); int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode); -void wake_up_bit(void *word, int bit); -int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode); -int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); -int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode); -struct wait_queue_head *bit_waitqueue(void *word, int bit); +void wake_up_bit(unsigned long *word, int bit); +int out_of_line_wait_on_bit(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode); +int out_of_line_wait_on_bit_timeout(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout); +int out_of_line_wait_on_bit_lock(unsigned long *word, int, wait_bit_action_f *action, unsigned int mode); +struct wait_queue_head *bit_waitqueue(unsigned long *word, int bit); extern void __init wait_bit_init(void); int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key); @@ -49,23 +49,24 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync extern int bit_wait(struct wait_bit_key *key, int mode); extern int bit_wait_io(struct wait_bit_key *key, int mode); extern int bit_wait_timeout(struct wait_bit_key *key, int mode); -extern int bit_wait_io_timeout(struct wait_bit_key *key, int mode); /** * wait_on_bit - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * - * There is a standard hashed waitqueue table for generic use. This - * is the part of the hashtable's accessor API that waits on a bit. - * For instance, if one were to have waiters on a bitflag, one would - * call wait_on_bit() in threads waiting for the bit to clear. - * One uses wait_on_bit() where one is waiting for the bit to clear, - * but has no intention of setting it. - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the target bit, even + * if other processes on the same queue are waiting for other bits. + * + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal. */ static inline int wait_on_bit(unsigned long *word, int bit, unsigned mode) @@ -80,17 +81,20 @@ wait_on_bit(unsigned long *word, int bit, unsigned mode) /** * wait_on_bit_io - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared. This is similar to wait_on_bit(), but calls - * io_schedule() instead of schedule() for the actual waiting. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). * - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * This is similar to wait_on_bit(), but calls io_schedule() instead of + * schedule() for the actual waiting. + * + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal. */ static inline int wait_on_bit_io(unsigned long *word, int bit, unsigned mode) @@ -104,19 +108,24 @@ wait_on_bit_io(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_timeout - wait for a bit to be cleared or a timeout to elapse + * @word: the address containing the bit being waited on + * @bit: the bit at that address being waited on * @mode: the task state to sleep in * @timeout: timeout, in jiffies * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared. This is similar to wait_on_bit(), except also takes a - * timeout parameter. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared, or for a timeout to expire. The + * clearing of the bit must be signalled with wake_up_bit(), often as + * clear_and_wake_up_bit(). + * + * This is similar to wait_on_bit(), except it also takes a timeout + * parameter. * - * Returned value will be zero if the bit was cleared before the - * @timeout elapsed, or non-zero if the @timeout elapsed or process - * received a signal and the mode permitted wakeup on that signal. + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or %-EINTR if the process received a + * signal and the mode permitted wake up on that signal, or %-EAGAIN if the + * timeout elapsed. */ static inline int wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, @@ -132,19 +141,21 @@ wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode, /** * wait_on_bit_action - wait for a bit to be cleared - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * @word: the address containing the bit waited on + * @bit: the bit at that address being waited on * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared, and allow the waiting action to be specified. - * This is like wait_on_bit() but allows fine control of how the waiting - * is done. + * Wait for the given bit in an unsigned long or bitmap (see DECLARE_BITMAP()) + * to be cleared. The clearing of the bit must be signalled with + * wake_up_bit(), often as clear_and_wake_up_bit(). + * + * This is similar to wait_on_bit(), but calls @action() instead of + * schedule() for the actual waiting. * - * Returned value will be zero if the bit was cleared, or non-zero - * if the process received a signal and the mode permitted wakeup - * on that signal. + * Returned value will be zero if the bit was cleared in which case the + * call has ACQUIRE semantics, or the error code returned by @action if + * that call returned non-zero. */ static inline int wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, @@ -157,23 +168,22 @@ wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action, } /** - * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @mode: the task state to sleep in * - * There is a standard hashed waitqueue table for generic use. This - * is the part of the hashtable's accessor API that waits on a bit - * when one intends to set it, for instance, trying to lock bitflags. - * For instance, if one were to have waiters trying to set bitflag - * and waiting for it to clear before setting it, one would call - * wait_on_bit() in threads waiting to be able to set the bit. - * One uses wait_on_bit_lock() where one is waiting for the bit to - * clear with the intention of setting it, and when done, clearing it. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be + * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As + * soon as it is clear, atomically set it and return. * - * Returns zero if the bit was (eventually) found to be clear and was - * set. Returns non-zero if a signal was delivered to the process and - * the @mode allows that signal to wake the process. + * This is similar to wait_on_bit(), but sets the bit before returning. + * + * Returned value will be zero if the bit was successfully set in which + * case the call has the same memory sequencing semantics as + * test_and_clear_bit(), or %-EINTR if the process received a signal and + * the mode permitted wake up on that signal. */ static inline int wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) @@ -185,15 +195,18 @@ wait_on_bit_lock(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock_io - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared and then to atomically set it. This is similar - * to wait_on_bit(), but calls io_schedule() instead of schedule() - * for the actual waiting. + * Wait for the given bit in an unsigned long or bitmap (see + * DECLARE_BITMAP()) to be cleared. The clearing of the bit must be + * signalled with wake_up_bit(), often as clear_and_wake_up_bit(). As + * soon as it is clear, atomically set it and return. + * + * This is similar to wait_on_bit_lock(), but calls io_schedule() instead + * of schedule(). * * Returns zero if the bit was (eventually) found to be clear and was * set. Returns non-zero if a signal was delivered to the process and @@ -209,21 +222,19 @@ wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode) } /** - * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it - * @word: the word being waited on, a kernel virtual address - * @bit: the bit of the word being waited on + * wait_on_bit_lock_action - wait for a bit to be cleared, then set it + * @word: the address containing the bit being waited on + * @bit: the bit of the word being waited on and set * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * - * Use the standard hashed waitqueue table to wait for a bit - * to be cleared and then to set it, and allow the waiting action - * to be specified. - * This is like wait_on_bit() but allows fine control of how the waiting - * is done. + * This is similar to wait_on_bit_lock(), but calls @action() instead of + * schedule() for the actual waiting. * - * Returns zero if the bit was (eventually) found to be clear and was - * set. Returns non-zero if a signal was delivered to the process and - * the @mode allows that signal to wake the process. + * Returned value will be zero if the bit was successfully set in which + * case the call has the same memory sequencing semantics as + * test_and_clear_bit(), or the error code returned by @action if that + * call returned non-zero. */ static inline int wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action, @@ -269,7 +280,26 @@ __out: __ret; \ #define __wait_var_event(var, condition) \ ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ schedule()) +#define __wait_var_event_io(var, condition) \ + ___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ + io_schedule()) +/** + * wait_var_event - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true, only re-checking when a wake up is + * received for the given @var (an arbitrary kernel address which need + * not be directly related to the given condition, but usually is). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the given address. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event(var, condition) \ do { \ might_sleep(); \ @@ -278,10 +308,56 @@ do { \ __wait_var_event(var, condition); \ } while (0) +/** + * wait_var_event_io - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for an IO related @condition to be true, only re-checking when a + * wake up is received for the given @var (an arbitrary kernel address + * which need not be directly related to the given condition, but + * usually is). + * + * The process will wait on a waitqueue selected by hash from a shared + * pool. It will only be woken on a wake_up for the given address. + * + * This is similar to wait_var_event(), but calls io_schedule() instead + * of schedule(). + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ +#define wait_var_event_io(var, condition) \ +do { \ + might_sleep(); \ + if (condition) \ + break; \ + __wait_var_event_io(var, condition); \ +} while (0) + #define __wait_var_event_killable(var, condition) \ ___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \ schedule()) +/** + * wait_var_event_killable - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true or a fatal signal to be received, + * only re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is + * 0 if the condition became true, or %-ERESTARTSYS if a fatal signal + * was received. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_killable(var, condition) \ ({ \ int __ret = 0; \ @@ -296,6 +372,26 @@ do { \ TASK_UNINTERRUPTIBLE, 0, timeout, \ __ret = schedule_timeout(__ret)) +/** + * wait_var_event_timeout - wait for a variable to be updated or a timeout to expire + * @var: the address of variable being waited on + * @condition: the condition to wait for + * @timeout: maximum time to wait in jiffies + * + * Wait for a @condition to be true or a timeout to expire, only + * re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is 0 if + * the timeout expired and the condition was still false, or the + * remaining time left in the timeout (but at least 1) if the condition + * was found to be true. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_timeout(var, condition, timeout) \ ({ \ long __ret = timeout; \ @@ -309,6 +405,23 @@ do { \ ___wait_var_event(var, condition, TASK_INTERRUPTIBLE, 0, 0, \ schedule()) +/** + * wait_var_event_killable - wait for a variable to be updated and notified + * @var: the address of variable being waited on + * @condition: the condition to wait for + * + * Wait for a @condition to be true or a signal to be received, only + * re-checking the condition when a wake up is received for the given + * @var (an arbitrary kernel address which need not be directly related + * to the given condition, but usually is). + * + * This is similar to wait_var_event() but returns a value which is 0 if + * the condition became true, or %-ERESTARTSYS if a signal was received. + * + * The condition should normally use smp_load_acquire() or a similarly + * ordered access to ensure that any changes to memory made before the + * condition became true will be visible after the wait completes. + */ #define wait_var_event_interruptible(var, condition) \ ({ \ int __ret = 0; \ @@ -319,15 +432,122 @@ do { \ }) /** - * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit + * wait_var_event_any_lock - wait for a variable to be updated under a lock + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @lock: the object that is locked to protect updates to the variable + * @type: prefix on lock and unlock operations + * @state: waiting state, %TASK_UNINTERRUPTIBLE etc. + * + * Wait for a condition which can only be reliably tested while holding + * a lock. The variables assessed in the condition will normal be updated + * under the same lock, and the wake up should be signalled with + * wake_up_var_locked() under the same lock. + * + * This is similar to wait_var_event(), but assumes a lock is held + * while calling this function and while updating the variable. * + * This must be called while the given lock is held and the lock will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. The functions used to + * unlock and lock the object are constructed by appending _unlock and _lock + * to @type. + * + * Return %-ERESTARTSYS if a signal arrives which is allowed to interrupt + * the wait according to @state. + */ +#define wait_var_event_any_lock(var, condition, lock, type, state) \ +({ \ + int __ret = 0; \ + if (!(condition)) \ + __ret = ___wait_var_event(var, condition, state, 0, 0, \ + type ## _unlock(lock); \ + schedule(); \ + type ## _lock(lock)); \ + __ret; \ +}) + +/** + * wait_var_event_spinlock - wait for a variable to be updated under a spinlock + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @lock: the spinlock which protects updates to the variable + * + * Wait for a condition which can only be reliably tested while holding + * a spinlock. The variables assessed in the condition will normal be updated + * under the same spinlock, and the wake up should be signalled with + * wake_up_var_locked() under the same spinlock. + * + * This is similar to wait_var_event(), but assumes a spinlock is held + * while calling this function and while updating the variable. + * + * This must be called while the given lock is held and the lock will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. + */ +#define wait_var_event_spinlock(var, condition, lock) \ + wait_var_event_any_lock(var, condition, lock, spin, TASK_UNINTERRUPTIBLE) + +/** + * wait_var_event_mutex - wait for a variable to be updated under a mutex + * @var: the address of the variable being waited on + * @condition: condition to wait for + * @mutex: the mutex which protects updates to the variable + * + * Wait for a condition which can only be reliably tested while holding + * a mutex. The variables assessed in the condition will normal be + * updated under the same mutex, and the wake up should be signalled + * with wake_up_var_locked() under the same mutex. + * + * This is similar to wait_var_event(), but assumes a mutex is held + * while calling this function and while updating the variable. + * + * This must be called while the given mutex is held and the mutex will be + * dropped when schedule() is called to wait for a wake up, and will be + * reclaimed before testing the condition again. + */ +#define wait_var_event_mutex(var, condition, lock) \ + wait_var_event_any_lock(var, condition, lock, mutex, TASK_UNINTERRUPTIBLE) + +/** + * wake_up_var_protected - wake up waiters for a variable asserting that it is safe + * @var: the address of the variable being waited on + * @cond: the condition which afirms this is safe + * + * When waking waiters which use wait_var_event_any_lock() the waker must be + * holding the reelvant lock to avoid races. This version of wake_up_var() + * asserts that the relevant lock is held and so no barrier is needed. + * The @cond is only tested when CONFIG_LOCKDEP is enabled. + */ +#define wake_up_var_protected(var, cond) \ +do { \ + lockdep_assert(cond); \ + wake_up_var(var); \ +} while (0) + +/** + * wake_up_var_locked - wake up waiters for a variable while holding a spinlock or mutex + * @var: the address of the variable being waited on + * @lock: The spinlock or mutex what protects the variable + * + * Send a wake up for the given variable which should be waited for with + * wait_var_event_spinlock() or wait_var_event_mutex(). Unlike wake_up_var(), + * no extra barriers are needed as the locking provides sufficient sequencing. + */ +#define wake_up_var_locked(var, lock) \ + wake_up_var_protected(var, lockdep_is_held(lock)) + +/** + * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit * @bit: the bit of the word being waited on - * @word: the word being waited on, a kernel virtual address + * @word: the address containing the bit being waited on * - * You can use this helper if bitflags are manipulated atomically rather than - * non-atomically under a lock. + * The designated bit is cleared and any tasks waiting in wait_on_bit() + * or similar will be woken. This call has RELEASE semantics so that + * any changes to memory made before this call are guaranteed to be visible + * after the corresponding wait_on_bit() completes. */ -static inline void clear_and_wake_up_bit(int bit, void *word) +static inline void clear_and_wake_up_bit(int bit, unsigned long *word) { clear_bit_unlock(bit, word); /* See wake_up_bit() for which memory barrier you need to use. */ @@ -335,4 +555,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word) wake_up_bit(word, bit); } +/** + * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit + * @bit: the bit of the word being waited on + * @word: the address of memory containing that bit + * + * If the bit is set and can be atomically cleared, any tasks waiting in + * wait_on_bit() or similar will be woken. This call has the same + * complete ordering semantics as test_and_clear_bit(). Any changes to + * memory made before this call are guaranteed to be visible after the + * corresponding wait_on_bit() completes. + * + * Returns %true if the bit was successfully set and the wake up was sent. + */ +static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word) +{ + if (!test_and_clear_bit(bit, word)) + return false; + /* no extra barrier required */ + wake_up_bit(word, bit); + return true; +} + +/** + * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters + * @var: the variable to dec and test + * + * Decrements the atomic variable and if it reaches zero, send a wake_up to any + * processes waiting on the variable. + * + * This function has the same complete ordering semantics as atomic_dec_and_test. + * + * Returns %true is the variable reaches zero and the wake up was sent. + */ + +static inline bool atomic_dec_and_wake_up(atomic_t *var) +{ + if (!atomic_dec_and_test(var)) + return false; + /* No extra barrier required */ + wake_up_var(var); + return true; +} + +/** + * store_release_wake_up - update a variable and send a wake_up + * @var: the address of the variable to be updated and woken + * @val: the value to store in the variable. + * + * Store the given value in the variable send a wake up to any tasks + * waiting on the variable. All necessary barriers are included to ensure + * the task calling wait_var_event() sees the new value and all values + * written to memory before this call. + */ +#define store_release_wake_up(var, val) \ +do { \ + smp_store_release(var, val); \ + smp_mb(); \ + wake_up_var(var); \ +} while (0) + #endif /* _LINUX_WAIT_BIT_H */ diff --git a/include/linux/wireless.h b/include/linux/wireless.h index e6e34d74dda0..03e5d3fe226d 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h @@ -21,8 +21,7 @@ struct compat_iw_point { __u16 length; __u16 flags; }; -#endif -#ifdef CONFIG_COMPAT + struct __compat_iw_event { __u16 len; /* Real length of this stuff */ __u16 cmd; /* Wireless IOCTL */ @@ -49,5 +48,5 @@ struct __compat_iw_event { #define IW_EV_COMPAT_POINT_LEN \ (IW_EV_COMPAT_LCP_LEN + sizeof(struct compat_iw_point) - \ IW_EV_COMPAT_POINT_OFF) -#endif +#endif /* CONFIG_COMPAT */ #endif /* _LINUX_WIRELESS_H */ diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 3275470b5531..10751c8e5e6a 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -34,7 +34,7 @@ struct wmi_device { * * Cast a struct device to a struct wmi_device. */ -#define to_wmi_device(device) container_of(device, struct wmi_device, dev) +#define to_wmi_device(device) container_of_const(device, struct wmi_device, dev) extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id, @@ -56,6 +56,7 @@ u8 wmidev_instance_count(struct wmi_device *wdev); * @no_singleton: Driver can be instantiated multiple times * @probe: Callback for device binding * @remove: Callback for device unbinding + * @shutdown: Callback for device shutdown * @notify: Callback for receiving WMI events * * This represents WMI drivers which handle WMI devices. @@ -68,9 +69,18 @@ struct wmi_driver { int (*probe)(struct wmi_device *wdev, const void *context); void (*remove)(struct wmi_device *wdev); + void (*shutdown)(struct wmi_device *wdev); void (*notify)(struct wmi_device *device, union acpi_object *data); }; +/** + * to_wmi_driver() - Helper macro to cast a driver to a wmi_driver + * @drv: driver struct + * + * Cast a struct device_driver to a struct wmi_driver. + */ +#define to_wmi_driver(drv) container_of_const(drv, struct wmi_driver, driver) + extern int __must_check __wmi_driver_register(struct wmi_driver *driver, struct module *owner); extern void wmi_driver_unregister(struct wmi_driver *driver); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 59c2695e12e7..b0dc957c3e56 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -412,7 +412,7 @@ enum wq_flags { }; enum wq_consts { - WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ + WQ_MAX_ACTIVE = 2048, /* I like 2048, better ideas? */ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE, WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, diff --git a/include/linux/writeback.h b/include/linux/writeback.h index d6db822e4bb3..d11b903c2edb 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -213,11 +213,8 @@ static inline void wait_on_inode(struct inode *inode) #include <linux/bio.h> void __inode_attach_wb(struct inode *inode, struct folio *folio); -void wbc_attach_and_unlock_inode(struct writeback_control *wbc, - struct inode *inode) - __releases(&inode->i_lock); void wbc_detach_inode(struct writeback_control *wbc); -void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, +void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio, size_t bytes); int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, enum wb_reason reason, struct wb_completion *done); @@ -254,22 +251,8 @@ static inline void inode_detach_wb(struct inode *inode) } } -/** - * wbc_attach_fdatawrite_inode - associate wbc and inode for fdatawrite - * @wbc: writeback_control of interest - * @inode: target inode - * - * This function is to be used by __filemap_fdatawrite_range(), which is an - * alternative entry point into writeback code, and first ensures @inode is - * associated with a bdi_writeback and attaches it to @wbc. - */ -static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, - struct inode *inode) -{ - spin_lock(&inode->i_lock); - inode_attach_wb(inode, NULL); - wbc_attach_and_unlock_inode(wbc, inode); -} +void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, + struct inode *inode); /** * wbc_init_bio - writeback specific initializtion of bio @@ -303,13 +286,6 @@ static inline void inode_detach_wb(struct inode *inode) { } -static inline void wbc_attach_and_unlock_inode(struct writeback_control *wbc, - struct inode *inode) - __releases(&inode->i_lock) -{ - spin_unlock(&inode->i_lock); -} - static inline void wbc_attach_fdatawrite_inode(struct writeback_control *wbc, struct inode *inode) { @@ -324,7 +300,7 @@ static inline void wbc_init_bio(struct writeback_control *wbc, struct bio *bio) } static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, - struct page *page, size_t bytes) + struct folio *folio, size_t bytes) { } diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index bb763085479a..a401a2f31a77 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -65,6 +65,16 @@ struct ww_acquire_ctx { #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; + /** + * @first_lock_dep_map: fake lockdep_map for first locked ww_mutex. + * + * lockdep requires the lockdep_map for the first locked ww_mutex + * in a ww transaction to remain in memory until all ww_mutexes of + * the transaction have been unlocked. Ensure this by keeping a + * fake locked ww_mutex lockdep map between ww_acquire_init() and + * ww_acquire_fini(). + */ + struct lockdep_map first_lock_dep_map; #endif #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH unsigned int deadlock_inject_interval; @@ -146,7 +156,10 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, &ww_class->acquire_key, 0); + lockdep_init_map(&ctx->first_lock_dep_map, ww_class->mutex_name, + &ww_class->mutex_key, 0); mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); + mutex_acquire_nest(&ctx->first_lock_dep_map, 0, 0, &ctx->dep_map, _RET_IP_); #endif #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH ctx->deadlock_inject_interval = 1; @@ -185,6 +198,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) { #ifdef CONFIG_DEBUG_LOCK_ALLOC + mutex_release(&ctx->first_lock_dep_map, _THIS_IP_); mutex_release(&ctx->dep_map, _THIS_IP_); #endif #ifdef DEBUG_WW_MUTEXES diff --git a/include/linux/wwan.h b/include/linux/wwan.h index 170fdee6339c..79c781875c09 100644 --- a/include/linux/wwan.h +++ b/include/linux/wwan.h @@ -17,6 +17,8 @@ * @WWAN_PORT_FIREHOSE: XML based command protocol * @WWAN_PORT_XMMRPC: Control protocol for Intel XMM modems * @WWAN_PORT_FASTBOOT: Fastboot protocol control + * @WWAN_PORT_ADB: ADB protocol control + * @WWAN_PORT_MIPC: MTK MIPC diagnostic interface * * @WWAN_PORT_MAX: Highest supported port types * @WWAN_PORT_UNKNOWN: Special value to indicate an unknown port type @@ -30,6 +32,8 @@ enum wwan_port_type { WWAN_PORT_FIREHOSE, WWAN_PORT_XMMRPC, WWAN_PORT_FASTBOOT, + WWAN_PORT_ADB, + WWAN_PORT_MIPC, /* Add new port types above this line */ diff --git a/include/linux/xattr.h b/include/linux/xattr.h index d20051865800..86b0d47984a1 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h @@ -19,6 +19,10 @@ #include <linux/user_namespace.h> #include <uapi/linux/xattr.h> +/* List of all open_how "versions". */ +#define XATTR_ARGS_SIZE_VER0 16 /* sizeof first published struct */ +#define XATTR_ARGS_SIZE_LATEST XATTR_ARGS_SIZE_VER0 + struct inode; struct dentry; diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 9cd1beef0654..d961ead91bf1 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -7,7 +7,7 @@ struct lruvec; -extern atomic_t zswap_stored_pages; +extern atomic_long_t zswap_stored_pages; #ifdef CONFIG_ZSWAP diff --git a/include/media/i2c/mt9p031.h b/include/media/i2c/mt9p031.h deleted file mode 100644 index f933cd0be8e5..000000000000 --- a/include/media/i2c/mt9p031.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef MT9P031_H -#define MT9P031_H - -struct v4l2_subdev; - -/* - * struct mt9p031_platform_data - MT9P031 platform data - * @ext_freq: Input clock frequency - * @target_freq: Pixel clock frequency - */ -struct mt9p031_platform_data { - unsigned int pixclk_pol:1; - int ext_freq; - int target_freq; -}; - -#endif diff --git a/include/media/i2c/ths7303.h b/include/media/i2c/ths7303.h index fee2818c558d..fc937025cdb4 100644 --- a/include/media/i2c/ths7303.h +++ b/include/media/i2c/ths7303.h @@ -5,7 +5,7 @@ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. * * Contributors: - * Hans Verkuil <hans.verkuil@cisco.com> + * Hans Verkuil <hansverk@cisco.com> * Lad, Prabhakar <prabhakar.lad@ti.com> * Martin Bugge <marbugge@cisco.com> */ diff --git a/include/media/media-entity.h b/include/media/media-entity.h index 0393b23129eb..64cf590b1134 100644 --- a/include/media/media-entity.h +++ b/include/media/media-entity.h @@ -1143,10 +1143,10 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph); /** * media_pipeline_start - Mark a pipeline as streaming - * @pad: Starting pad + * @origin: Starting pad * @pipe: Media pipeline to be assigned to all pads in the pipeline. * - * Mark all pads connected to a given pad through enabled links, either + * Mark all pads connected to pad @origin through enabled links, either * directly or indirectly, as streaming. The given pipeline object is assigned * to every pad in the pipeline and stored in the media_pad pipe field. * @@ -1155,17 +1155,17 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph); * pipeline pointer must be identical for all nested calls to * media_pipeline_start(). */ -__must_check int media_pipeline_start(struct media_pad *pad, +__must_check int media_pipeline_start(struct media_pad *origin, struct media_pipeline *pipe); /** * __media_pipeline_start - Mark a pipeline as streaming * - * @pad: Starting pad + * @origin: Starting pad * @pipe: Media pipeline to be assigned to all pads in the pipeline. * * ..note:: This is the non-locking version of media_pipeline_start() */ -__must_check int __media_pipeline_start(struct media_pad *pad, +__must_check int __media_pipeline_start(struct media_pad *origin, struct media_pipeline *pipe); /** diff --git a/include/media/media-request.h b/include/media/media-request.h index 3cd25a2717ce..d4ac557678a7 100644 --- a/include/media/media-request.h +++ b/include/media/media-request.h @@ -5,7 +5,7 @@ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. * Copyright (C) 2018 Intel Corporation * - * Author: Hans Verkuil <hans.verkuil@cisco.com> + * Author: Hans Verkuil <hansverk@cisco.com> * Author: Sakari Ailus <sakari.ailus@linux.intel.com> */ diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index d82dfdbf6e58..1b6222fab24e 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -62,6 +62,7 @@ struct v4l2_ioctl_callbacks; struct video_device; struct v4l2_device; struct v4l2_ctrl_handler; +struct dentry; /** * enum v4l2_video_device_flags - Flags used by &struct video_device @@ -539,6 +540,20 @@ static inline int video_is_registered(struct video_device *vdev) return test_bit(V4L2_FL_REGISTERED, &vdev->flags); } +/** + * v4l2_debugfs_root - returns the dentry of the top-level "v4l2" debugfs dir + * + * If this directory does not yet exist, then it will be created. + */ +#ifdef CONFIG_DEBUG_FS +struct dentry *v4l2_debugfs_root(void); +#else +static inline struct dentry *v4l2_debugfs_root(void) +{ + return NULL; +} +#endif + #if defined(CONFIG_MEDIA_CONTROLLER) /** diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h index 8fa963326bf6..ff07dc6b103c 100644 --- a/include/media/v4l2-dv-timings.h +++ b/include/media/v4l2-dv-timings.h @@ -8,6 +8,7 @@ #ifndef __V4L2_DV_TIMINGS_H #define __V4L2_DV_TIMINGS_H +#include <linux/debugfs.h> #include <linux/videodev2.h> /** @@ -146,15 +147,18 @@ void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix, * @polarities: the horizontal and vertical polarities (same as struct * v4l2_bt_timings polarities). * @interlaced: if this flag is true, it indicates interlaced format + * @cap: the v4l2_dv_timings_cap capabilities. * @fmt: the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid CVT format. If so, then it will return true, and fmt will be filled * in with the found CVT timings. */ -bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, - unsigned active_width, u32 polarities, bool interlaced, - struct v4l2_dv_timings *fmt); +bool v4l2_detect_cvt(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, unsigned int active_width, + u32 polarities, bool interlaced, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); /** * v4l2_detect_gtf - detect if the given timings follow the GTF standard @@ -170,15 +174,18 @@ bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync, * image height, so it has to be passed explicitly. Usually * the native screen aspect ratio is used for this. If it * is not filled in correctly, then 16:9 will be assumed. + * @cap: the v4l2_dv_timings_cap capabilities. * @fmt: the resulting timings. * * This function will attempt to detect if the given values correspond to a * valid GTF format. If so, then it will return true, and fmt will be filled * in with the found GTF timings. */ -bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync, - u32 polarities, bool interlaced, struct v4l2_fract aspect, - struct v4l2_dv_timings *fmt); +bool v4l2_detect_gtf(unsigned int frame_height, unsigned int hfreq, + unsigned int vsync, u32 polarities, bool interlaced, + struct v4l2_fract aspect, + const struct v4l2_dv_timings_cap *cap, + struct v4l2_dv_timings *fmt); /** * v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes @@ -251,4 +258,51 @@ void v4l2_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr); u16 v4l2_phys_addr_for_input(u16 phys_addr, u8 input); int v4l2_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); +/* Add support for exporting InfoFrames to debugfs */ + +/* + * HDMI InfoFrames start with a 3 byte header, then a checksum, + * followed by the actual IF payload. + * + * The payload length is limited to 30 bytes according to the HDMI spec, + * but since the length is encoded in 5 bits, it can be 31 bytes theoretically. + * So set the max length as 31 + 3 (header) + 1 (checksum) = 35. + */ +#define V4L2_DEBUGFS_IF_MAX_LEN (35) + +#define V4L2_DEBUGFS_IF_AVI BIT(0) +#define V4L2_DEBUGFS_IF_AUDIO BIT(1) +#define V4L2_DEBUGFS_IF_SPD BIT(2) +#define V4L2_DEBUGFS_IF_HDMI BIT(3) + +typedef ssize_t (*v4l2_debugfs_if_read_t)(u32 type, void *priv, + struct file *filp, char __user *ubuf, + size_t count, loff_t *ppos); + +struct v4l2_debugfs_if { + struct dentry *if_dir; + void *priv; + + v4l2_debugfs_if_read_t if_read; +}; + +#ifdef CONFIG_DEBUG_FS +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read); +void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes); +#else +static inline +struct v4l2_debugfs_if *v4l2_debugfs_if_alloc(struct dentry *root, u32 if_types, + void *priv, + v4l2_debugfs_if_read_t if_read) +{ + return NULL; +} + +static inline void v4l2_debugfs_if_free(struct v4l2_debugfs_if *infoframes) +{ +} +#endif + #endif diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 8daa0929865c..2f2200875b03 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -450,8 +450,9 @@ enum v4l2_subdev_pre_streamon_flags { * already started or stopped subdev. Also see call_s_stream wrapper in * v4l2-subdev.c. * - * New drivers should instead implement &v4l2_subdev_pad_ops.enable_streams - * and &v4l2_subdev_pad_ops.disable_streams operations, and use + * This callback is DEPRECATED. New drivers should instead implement + * &v4l2_subdev_pad_ops.enable_streams and + * &v4l2_subdev_pad_ops.disable_streams operations, and use * v4l2_subdev_s_stream_helper for the &v4l2_subdev_video_ops.s_stream * operation to support legacy users. * @@ -833,11 +834,19 @@ struct v4l2_subdev_state { * v4l2_subdev_init_finalize() at initialization time). Do not call * directly, use v4l2_subdev_enable_streams() instead. * + * Drivers that support only a single stream without setting the + * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask + * argument. + * * @disable_streams: Disable the streams defined in streams_mask on the given * source pad. Subdevs that implement this operation must use the active * state management provided by the subdev core (enabled through a call to * v4l2_subdev_init_finalize() at initialization time). Do not call * directly, use v4l2_subdev_disable_streams() instead. + * + * Drivers that support only a single stream without setting the + * V4L2_SUBDEV_CAP_STREAMS sub-device capability flag can ignore the mask + * argument. */ struct v4l2_subdev_pad_ops { int (*enum_mbus_code)(struct v4l2_subdev *sd, @@ -1676,6 +1685,8 @@ int v4l2_subdev_routing_validate(struct v4l2_subdev *sd, * function implements a best-effort compatibility by calling the .s_stream() * operation, limited to subdevs that have a single source pad. * + * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0). + * * Return: * * 0: Success * * -EALREADY: One of the streams in streams_mask is already enabled @@ -1706,6 +1717,8 @@ int v4l2_subdev_enable_streams(struct v4l2_subdev *sd, u32 pad, * function implements a best-effort compatibility by calling the .s_stream() * operation, limited to subdevs that have a single source pad. * + * Drivers that are not stream-aware shall set @streams_mask to BIT_ULL(0). + * * Return: * * 0: Success * * -EALREADY: One of the streams in streams_mask is not enabled diff --git a/include/net/act_api.h b/include/net/act_api.h index 77ee0c657e2c..404df8557f6a 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -219,7 +219,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp, int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind, int ref, bool terse); int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int); -int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int); static inline void tcf_action_update_bstats(struct tc_action *a, struct sk_buff *skb) diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index bab1e3d7452a..6203bd8663b7 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -1,7 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated - Copyright 2023 NXP + Copyright 2023-2024 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -29,6 +29,7 @@ #define HCI_MAX_ACL_SIZE 1024 #define HCI_MAX_SCO_SIZE 255 #define HCI_MAX_ISO_SIZE 251 +#define HCI_MAX_ISO_BIS 31 #define HCI_MAX_EVENT_SIZE 260 #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) @@ -67,6 +68,7 @@ #define HCI_I2C 8 #define HCI_SMD 9 #define HCI_VIRTIO 10 +#define HCI_IPC 11 /* HCI device quirks */ enum { @@ -300,6 +302,20 @@ enum { */ HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, + /* + * When this quirk is set, the HCI_OP_LE_EXT_CREATE_CONN command is + * disabled. This is required for the Actions Semiconductor ATS2851 + * based controllers, which erroneously claims to support it. + */ + HCI_QUIRK_BROKEN_EXT_CREATE_CONN, + + /* + * When this quirk is set, the command WRITE_AUTH_PAYLOAD_TIMEOUT is + * skipped. This is required for the Actions Semiconductor ATS2851 + * based controllers, due to a race condition in pairing process. + */ + HCI_QUIRK_BROKEN_WRITE_AUTH_PAYLOAD_TIMEOUT, + /* When this quirk is set, MSFT extension monitor tracking by * address filter is supported. Since tracking quantity of each * pattern is limited, this feature supports tracking multiple @@ -683,6 +699,7 @@ enum { #define HCI_RSSI_INVALID 127 #define HCI_SYNC_HANDLE_INVALID 0xffff +#define HCI_SID_INVALID 0xff #define HCI_ROLE_MASTER 0x00 #define HCI_ROLE_SLAVE 0x01 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 88265d37aa72..ea798f07c5a2 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -668,6 +668,7 @@ struct hci_conn { __u8 adv_instance; __u16 handle; __u16 sync_handle; + __u8 sid; __u16 state; __u16 mtu; __u8 mode; @@ -710,6 +711,9 @@ struct hci_conn { __s8 tx_power; __s8 max_tx_power; struct bt_iso_qos iso_qos; + __u8 num_bis; + __u8 bis[HCI_MAX_ISO_BIS]; + unsigned long flags; enum conn_reasons conn_reason; @@ -945,8 +949,10 @@ enum { HCI_CONN_PER_ADV, HCI_CONN_BIG_CREATED, HCI_CONN_CREATE_CIS, + HCI_CONN_CREATE_BIG_SYNC, HCI_CONN_BIG_SYNC, HCI_CONN_BIG_SYNC_FAILED, + HCI_CONN_CREATE_PA_SYNC, HCI_CONN_PA_SYNC, HCI_CONN_PA_SYNC_FAILED, }; @@ -1099,6 +1105,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, return NULL; } +static inline struct hci_conn *hci_conn_hash_lookup_sid(struct hci_dev *hdev, + __u8 sid, + bdaddr_t *dst, + __u8 dst_type) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK || bacmp(&c->dst, dst) || + c->dst_type != dst_type || c->sid != sid) + continue; + + rcu_read_unlock(); + return c; + } + + rcu_read_unlock(); + + return NULL; +} + static inline struct hci_conn * hci_conn_hash_lookup_per_adv_bis(struct hci_dev *hdev, bdaddr_t *ba, @@ -1255,7 +1285,17 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, rcu_read_lock(); list_for_each_entry_rcu(c, &h->list, list) { - if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK) + if (c->type != ISO_LINK) + continue; + + /* An ISO_LINK hcon with BDADDR_ANY as destination + * address is a Broadcast connection. A Broadcast + * slave connection is associated with a PA train, + * so the sync_handle can be used to differentiate + * from unicast. + */ + if (bacmp(&c->dst, BDADDR_ANY) && + c->sync_handle == HCI_SYNC_HANDLE_INVALID) continue; if (handle == c->iso_qos.bcast.big) { @@ -1270,6 +1310,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, } static inline struct hci_conn * +hci_conn_hash_lookup_big_sync_pend(struct hci_dev *hdev, + __u8 handle, __u8 num_bis) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != ISO_LINK) + continue; + + if (handle == c->iso_qos.bcast.big && num_bis == c->num_bis) { + rcu_read_unlock(); + return c; + } + } + + rcu_read_unlock(); + + return NULL; +} + +static inline struct hci_conn * hci_conn_hash_lookup_big_state(struct hci_dev *hdev, __u8 handle, __u16 state) { struct hci_conn_hash *h = &hdev->conn_hash; @@ -1328,6 +1392,13 @@ hci_conn_hash_lookup_pa_sync_handle(struct hci_dev *hdev, __u16 sync_handle) if (c->type != ISO_LINK) continue; + /* Ignore the listen hcon, we are looking + * for the child hcon that was created as + * a result of the PA sync established event. + */ + if (c->state == BT_LISTEN) + continue; + if (c->sync_handle == sync_handle) { rcu_read_unlock(); return c; @@ -1445,6 +1516,8 @@ bool hci_setup_sync(struct hci_conn *conn, __u16 handle); void hci_sco_setup(struct hci_conn *conn, __u8 status); bool hci_iso_setup_path(struct hci_conn *conn); int hci_le_create_cis_pending(struct hci_dev *hdev); +int hci_pa_create_sync_pending(struct hci_dev *hdev); +int hci_le_big_create_sync_pending(struct hci_dev *hdev); int hci_conn_check_create_cis(struct hci_conn *conn); struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, @@ -1871,8 +1944,8 @@ void hci_conn_del_sysfs(struct hci_conn *conn); !test_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &(dev)->quirks)) /* Use ext create connection if command is supported */ -#define use_ext_conn(dev) ((dev)->commands[37] & 0x80) - +#define use_ext_conn(dev) (((dev)->commands[37] & 0x80) && \ + !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, &(dev)->quirks)) /* Extended advertising support */ #define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV)) @@ -1885,8 +1958,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn); * C24: Mandatory if the LE Controller supports Connection State and either * LE Feature (LL Privacy) or LE Feature (Extended Advertising) is supported */ -#define use_enhanced_conn_complete(dev) (ll_privacy_capable(dev) || \ - ext_adv_capable(dev)) +#define use_enhanced_conn_complete(dev) ((ll_privacy_capable(dev) || \ + ext_adv_capable(dev)) && \ + !test_bit(HCI_QUIRK_BROKEN_EXT_CREATE_CONN, \ + &(dev)->quirks)) /* Periodic advertising support */ #define per_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_PERIODIC_ADV)) diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index d382679efd2b..affac861efdc 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -878,6 +878,16 @@ struct mgmt_cp_mesh_send_cancel { } __packed; #define MGMT_MESH_SEND_CANCEL_SIZE 1 +#define MGMT_OP_HCI_CMD_SYNC 0x005B +struct mgmt_cp_hci_cmd_sync { + __le16 opcode; + __u8 event; + __u8 timeout; + __le16 params_len; + __u8 params[]; +} __packed; +#define MGMT_HCI_CMD_SYNC_SIZE 6 + #define MGMT_EV_CMD_COMPLETE 0x0001 struct mgmt_ev_cmd_complete { __le16 opcode; diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index f03040baaefd..c858270141bc 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h @@ -52,6 +52,9 @@ void napi_busy_loop_rcu(unsigned int napi_id, bool (*loop_end)(void *, unsigned long), void *loop_end_arg, bool prefer_busy_poll, u16 budget); +void napi_suspend_irqs(unsigned int napi_id); +void napi_resume_irqs(unsigned int napi_id); + #else /* CONFIG_NET_RX_BUSY_POLL */ static inline unsigned long net_busy_loop_on(void) { diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h index 5ee7b322e18b..a000dc45f966 100644 --- a/include/net/caif/cfsrvl.h +++ b/include/net/caif/cfsrvl.h @@ -40,7 +40,6 @@ void cfsrvl_init(struct cfsrvl *service, struct dev_info *dev_info, bool supports_flowctrl); bool cfsrvl_ready(struct cfsrvl *service, int *err); -u8 cfsrvl_getphyid(struct cflayer *layer); static inline void cfsrvl_get(struct cflayer *layr) { diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 941dc62f3027..27acf1292a5c 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -1460,7 +1460,6 @@ struct cfg80211_unsol_bcast_probe_resp { * @crypto: crypto settings * @privacy: the BSS uses privacy * @auth_type: Authentication type (algorithm) - * @smps_mode: SMPS mode * @inactivity_timeout: time in seconds to determine station's inactivity. * @p2p_ctwindow: P2P CT Window * @p2p_opp_ps: P2P opportunistic PS @@ -1498,7 +1497,6 @@ struct cfg80211_ap_settings { struct cfg80211_crypto_settings crypto; bool privacy; enum nl80211_auth_type auth_type; - enum nl80211_smps_mode smps_mode; int inactivity_timeout; u8 p2p_ctwindow; bool p2p_opp_ps; @@ -2269,6 +2267,7 @@ static inline int cfg80211_get_station(struct net_device *dev, * @MONITOR_FLAG_OTHER_BSS: disable BSSID filtering * @MONITOR_FLAG_COOK_FRAMES: report frames after processing * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address + * @MONITOR_FLAG_SKIP_TX: do not pass locally transmitted frames */ enum monitor_flags { MONITOR_FLAG_CHANGED = BIT(__NL80211_MNTR_FLAG_INVALID), @@ -2278,6 +2277,7 @@ enum monitor_flags { MONITOR_FLAG_OTHER_BSS = BIT(NL80211_MNTR_FLAG_OTHER_BSS), MONITOR_FLAG_COOK_FRAMES = BIT(NL80211_MNTR_FLAG_COOK_FRAMES), MONITOR_FLAG_ACTIVE = BIT(NL80211_MNTR_FLAG_ACTIVE), + MONITOR_FLAG_SKIP_TX = BIT(NL80211_MNTR_FLAG_SKIP_TX), }; /** @@ -4696,6 +4696,7 @@ struct cfg80211_ops { struct ieee80211_channel *chan); int (*set_monitor_channel)(struct wiphy *wiphy, + struct net_device *dev, struct cfg80211_chan_def *chandef); int (*scan)(struct wiphy *wiphy, @@ -5436,6 +5437,8 @@ struct wiphy_radio_freq_range { * @iface_combinations: Valid interface combinations array, should not * list single interface types. * @n_iface_combinations: number of entries in @iface_combinations array. + * + * @antenna_mask: bitmask of antennas connected to this radio. */ struct wiphy_radio { const struct wiphy_radio_freq_range *freq_range; @@ -5443,6 +5446,8 @@ struct wiphy_radio { const struct ieee80211_iface_combination *iface_combinations; int n_iface_combinations; + + u32 antenna_mask; }; #define CFG80211_HW_TIMESTAMP_ALL_PEERS 0xffff @@ -6267,6 +6272,7 @@ enum ieee80211_ap_reg_power { * entered. * @links.cac_time_ms: CAC time in ms * @valid_links: bitmap describing what elements of @links are valid + * @radio_mask: Bitmask of radios that this interface is allowed to operate on. */ struct wireless_dev { struct wiphy *wiphy; @@ -6379,6 +6385,8 @@ struct wireless_dev { unsigned int cac_time_ms; } links[IEEE80211_MLD_MAX_NUM_LINKS]; u16 valid_links; + + u32 radio_mask; }; static inline const u8 *wdev_address(struct wireless_dev *wdev) @@ -6565,6 +6573,17 @@ bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, const struct cfg80211_chan_def *chandef); /** + * cfg80211_wdev_channel_allowed - Check if the wdev may use the channel + * + * @wdev: the wireless device + * @chan: channel to check + * + * Return: whether or not the wdev may use the channel + */ +bool cfg80211_wdev_channel_allowed(struct wireless_dev *wdev, + struct ieee80211_channel *chan); + +/** * ieee80211_get_response_rate - get basic rate for a given rate * * @sband: the band to look for rates in diff --git a/include/net/checksum.h b/include/net/checksum.h index 1338cb92c8e7..243f972267b8 100644 --- a/include/net/checksum.h +++ b/include/net/checksum.h @@ -151,6 +151,12 @@ static inline void csum_replace(__wsum *csum, __wsum old, __wsum new) *csum = csum_add(csum_sub(*csum, old), new); } +static inline unsigned short csum_from32to16(unsigned int sum) +{ + sum += (sum >> 16) | (sum << 16); + return (unsigned short)(sum >> 16); +} + struct sk_buff; void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb, __be32 from, __be32 to, bool pseudohdr); diff --git a/include/net/devlink.h b/include/net/devlink.h index db5eff6cb60f..fbb9a2668e24 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -1779,12 +1779,6 @@ int devl_resource_register(struct devlink *devlink, u64 resource_id, u64 parent_resource_id, const struct devlink_resource_size_params *size_params); -int devlink_resource_register(struct devlink *devlink, - const char *resource_name, - u64 resource_size, - u64 resource_id, - u64 parent_resource_id, - const struct devlink_resource_size_params *size_params); void devl_resources_unregister(struct devlink *devlink); void devlink_resources_unregister(struct devlink *devlink); int devl_resource_size_get(struct devlink *devlink, @@ -1797,15 +1791,8 @@ void devl_resource_occ_get_register(struct devlink *devlink, u64 resource_id, devlink_resource_occ_get_t *occ_get, void *occ_get_priv); -void devlink_resource_occ_get_register(struct devlink *devlink, - u64 resource_id, - devlink_resource_occ_get_t *occ_get, - void *occ_get_priv); void devl_resource_occ_get_unregister(struct devlink *devlink, u64 resource_id); - -void devlink_resource_occ_get_unregister(struct devlink *devlink, - u64 resource_id); int devl_params_register(struct devlink *devlink, const struct devlink_param *params, size_t params_count); diff --git a/include/net/dropreason-core.h b/include/net/dropreason-core.h index 4748680e8c88..6c5a1ea209a2 100644 --- a/include/net/dropreason-core.h +++ b/include/net/dropreason-core.h @@ -76,6 +76,10 @@ FN(INVALID_PROTO) \ FN(IP_INADDRERRORS) \ FN(IP_INNOROUTES) \ + FN(IP_LOCAL_SOURCE) \ + FN(IP_INVALID_SOURCE) \ + FN(IP_LOCALNET) \ + FN(IP_INVALID_DEST) \ FN(PKT_TOO_BIG) \ FN(DUP_FRAG) \ FN(FRAG_REASM_TIMEOUT) \ @@ -92,6 +96,15 @@ FN(PACKET_SOCK_ERROR) \ FN(TC_CHAIN_NOTFOUND) \ FN(TC_RECLASSIFY_LOOP) \ + FN(VXLAN_INVALID_HDR) \ + FN(VXLAN_VNI_NOT_FOUND) \ + FN(MAC_INVALID_SOURCE) \ + FN(VXLAN_ENTRY_EXISTS) \ + FN(VXLAN_NO_REMOTE) \ + FN(IP_TUNNEL_ECN) \ + FN(TUNNEL_TXINFO) \ + FN(LOCAL_MAC) \ + FN(ARP_PVLAN_DISABLE) \ FNe(MAX) /** @@ -365,6 +378,21 @@ enum skb_drop_reason { * IPSTATS_MIB_INADDRERRORS */ SKB_DROP_REASON_IP_INNOROUTES, + /** @SKB_DROP_REASON_IP_LOCAL_SOURCE: the source ip is local */ + SKB_DROP_REASON_IP_LOCAL_SOURCE, + /** + * @SKB_DROP_REASON_IP_INVALID_SOURCE: the source ip is invalid: + * 1) source ip is multicast or limited broadcast + * 2) source ip is zero and not IGMP + */ + SKB_DROP_REASON_IP_INVALID_SOURCE, + /** @SKB_DROP_REASON_IP_LOCALNET: source or dest ip is local net */ + SKB_DROP_REASON_IP_LOCALNET, + /** + * @SKB_DROP_REASON_IP_INVALID_DEST: the dest ip is invalid: + * 1) dest ip is 0 + */ + SKB_DROP_REASON_IP_INVALID_DEST, /** * @SKB_DROP_REASON_PKT_TOO_BIG: packet size is too big (maybe exceed the * MTU) @@ -419,6 +447,44 @@ enum skb_drop_reason { */ SKB_DROP_REASON_TC_RECLASSIFY_LOOP, /** + * @SKB_DROP_REASON_VXLAN_INVALID_HDR: VXLAN header is invalid. E.g.: + * 1) reserved fields are not zero + * 2) "I" flag is not set + */ + SKB_DROP_REASON_VXLAN_INVALID_HDR, + /** @SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND: no VXLAN device found for VNI */ + SKB_DROP_REASON_VXLAN_VNI_NOT_FOUND, + /** @SKB_DROP_REASON_MAC_INVALID_SOURCE: source mac is invalid */ + SKB_DROP_REASON_MAC_INVALID_SOURCE, + /** + * @SKB_DROP_REASON_VXLAN_ENTRY_EXISTS: trying to migrate a static + * entry or an entry pointing to a nexthop. + */ + SKB_DROP_REASON_VXLAN_ENTRY_EXISTS, + /** @SKB_DROP_REASON_VXLAN_NO_REMOTE: no remote found for xmit */ + SKB_DROP_REASON_VXLAN_NO_REMOTE, + /** + * @SKB_DROP_REASON_IP_TUNNEL_ECN: skb is dropped according to + * RFC 6040 4.2, see __INET_ECN_decapsulate() for detail. + */ + SKB_DROP_REASON_IP_TUNNEL_ECN, + /** + * @SKB_DROP_REASON_TUNNEL_TXINFO: packet without necessary metadata + * reached a device which is in "external" mode. + */ + SKB_DROP_REASON_TUNNEL_TXINFO, + /** + * @SKB_DROP_REASON_LOCAL_MAC: the source MAC address is equal to + * the MAC address of the local netdev. + */ + SKB_DROP_REASON_LOCAL_MAC, + /** + * @SKB_DROP_REASON_ARP_PVLAN_DISABLE: packet which is not IP is + * forwarded to the in_dev, and the proxy_arp_pvlan is not + * enabled. + */ + SKB_DROP_REASON_ARP_PVLAN_DISABLE, + /** * @SKB_DROP_REASON_MAX: the maximum of core drop reasons, which * shouldn't be used as a real 'reason' - only for tracing code gen */ diff --git a/include/net/dsa.h b/include/net/dsa.h index d7a6c2930277..72ae65e7246a 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -885,21 +885,6 @@ struct dsa_switch_ops { */ void (*phylink_get_caps)(struct dsa_switch *ds, int port, struct phylink_config *config); - struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds, - int port, - phy_interface_t iface); - void (*phylink_mac_config)(struct dsa_switch *ds, int port, - unsigned int mode, - const struct phylink_link_state *state); - void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface); - void (*phylink_mac_link_up)(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface, - struct phy_device *phydev, - int speed, int duplex, - bool tx_pause, bool rx_pause); void (*phylink_fixed_state)(struct dsa_switch *ds, int port, struct phylink_link_state *state); /* diff --git a/include/net/eee.h b/include/net/eee.h index 84837aba3cd9..cfab1b8bc46a 100644 --- a/include/net/eee.h +++ b/include/net/eee.h @@ -13,10 +13,7 @@ struct eee_config { static inline bool eeecfg_mac_can_tx_lpi(const struct eee_config *eeecfg) { /* eee_enabled is the master on/off */ - if (!eeecfg->eee_enabled || !eeecfg->tx_lpi_enabled) - return false; - - return true; + return eeecfg->eee_enabled && eeecfg->tx_lpi_enabled; } static inline void eeecfg_to_eee(struct ethtool_keee *eee, diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h index 6d59221ff05a..48aad6128fea 100644 --- a/include/net/fib_notifier.h +++ b/include/net/fib_notifier.h @@ -28,7 +28,7 @@ enum fib_event_type { struct fib_notifier_ops { int family; struct list_head list; - unsigned int (*fib_seq_read)(struct net *net); + unsigned int (*fib_seq_read)(const struct net *net); int (*fib_dump)(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); struct module *owner; diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index d17855c52ef9..04383d90a1e3 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h @@ -176,7 +176,7 @@ int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table); bool fib_rule_matchall(const struct fib_rule *rule); int fib_rules_dump(struct net *net, struct notifier_block *nb, int family, struct netlink_ext_ack *extack); -unsigned int fib_rules_seq_read(struct net *net, int family); +unsigned int fib_rules_seq_read(const struct net *net, int family); int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index 292cd8f4b762..596ab9791e4d 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -685,6 +685,7 @@ struct flow_cls_common_offload { u32 chain_index; __be16 protocol; u32 prio; + bool skip_sw; struct netlink_ext_ack *extack; }; diff --git a/include/net/genetlink.h b/include/net/genetlink.h index c1d91f1d20f6..d096cc6352de 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -124,7 +124,8 @@ struct genl_family { * @genlhdr: generic netlink message header * @attrs: netlink attributes * @_net: network namespace - * @user_ptr: user pointers + * @ctx: storage space for the use by the family + * @user_ptr: user pointers (deprecated, use ctx instead) * @extack: extended ACK report struct */ struct genl_info { @@ -135,7 +136,10 @@ struct genl_info { struct genlmsghdr * genlhdr; struct nlattr ** attrs; possible_net_t _net; - void * user_ptr[2]; + union { + u8 ctx[NETLINK_CTX_SIZE]; + void * user_ptr[2]; + }; struct netlink_ext_ack *extack; }; diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index c0deaafebfdc..3c82fad904d4 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h @@ -197,12 +197,12 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) struct inet_connection_sock *icsk = inet_csk(sk); if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0) { - icsk->icsk_pending = 0; + smp_store_release(&icsk->icsk_pending, 0); #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_retransmit_timer); #endif } else if (what == ICSK_TIME_DACK) { - icsk->icsk_ack.pending = 0; + smp_store_release(&icsk->icsk_ack.pending, 0); icsk->icsk_ack.retry = 0; #ifdef INET_CSK_CLEAR_TIMERS sk_stop_timer(sk, &icsk->icsk_delack_timer); @@ -229,11 +229,12 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what, if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 || what == ICSK_TIME_LOSS_PROBE || what == ICSK_TIME_REO_TIMEOUT) { - icsk->icsk_pending = what; + smp_store_release(&icsk->icsk_pending, what); icsk->icsk_timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); } else if (what == ICSK_TIME_DACK) { - icsk->icsk_ack.pending |= ICSK_ACK_TIMER; + smp_store_release(&icsk->icsk_ack.pending, + icsk->icsk_ack.pending | ICSK_ACK_TIMER); icsk->icsk_ack.timeout = jiffies + when; sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); } else { diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 394c3b66065e..56d8bc5593d3 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -174,6 +174,7 @@ struct inet_cork { __s16 tos; char priority; __u16 gso_size; + u32 ts_opt_id; u64 transmit_time; u32 mark; }; @@ -241,7 +242,8 @@ struct inet_sock { struct inet_cork_full cork; }; -#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ +#define IPCORK_OPT 1 /* ip-options has been held in ipcork.opt */ +#define IPCORK_TS_OPT_ID 2 /* ts_opt_id field is valid, overriding sk_tskey */ enum { INET_FLAGS_PKTINFO = 0, @@ -319,8 +321,10 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet) static inline struct sock *sk_to_full_sk(struct sock *sk) { #ifdef CONFIG_INET - if (sk && sk->sk_state == TCP_NEW_SYN_RECV) + if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) sk = inet_reqsk(sk)->rsk_listener; + if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) + sk = NULL; #endif return sk; } @@ -329,8 +333,10 @@ static inline struct sock *sk_to_full_sk(struct sock *sk) static inline const struct sock *sk_const_to_full_sk(const struct sock *sk) { #ifdef CONFIG_INET - if (sk && sk->sk_state == TCP_NEW_SYN_RECV) + if (sk && READ_ONCE(sk->sk_state) == TCP_NEW_SYN_RECV) sk = ((const struct request_sock *)sk)->rsk_listener; + if (sk && READ_ONCE(sk->sk_state) == TCP_TIME_WAIT) + sk = NULL; #endif return sk; } diff --git a/include/net/ip.h b/include/net/ip.h index d92d3bc3ec0e..0e548c1f2a0e 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -288,7 +288,8 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg) return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0; } -void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, +void ip_send_unicast_reply(struct sock *sk, const struct sock *orig_sk, + struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, @@ -424,6 +425,11 @@ int ip_decrease_ttl(struct iphdr *iph) return --iph->ttl; } +static inline dscp_t ip4h_dscp(const struct iphdr *ip4h) +{ + return inet_dsfield_to_dscp(ip4h->tos); +} + static inline int ip_mtu_locked(const struct dst_entry *dst) { const struct rtable *rt = dst_rtable(dst); @@ -684,6 +690,11 @@ static inline unsigned int ipv4_addr_hash(__be32 ip) return (__force unsigned int) ip; } +static inline u32 __ipv4_addr_hash(const __be32 ip, const u32 initval) +{ + return jhash_1word((__force u32)ip, initval); +} + static inline u32 ipv4_portaddr_hash(const struct net *net, __be32 saddr, unsigned int port) diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index 6cb867ce4878..7c87873ae211 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h @@ -394,7 +394,7 @@ struct fib6_table { struct fib6_node tb6_root; struct inet_peer_base tb6_peers; unsigned int flags; - unsigned int fib_seq; + unsigned int fib_seq; /* writes protected by rtnl_mutex */ struct hlist_head tb6_gc_hlist; /* GC candidates */ #define RT6_TABLE_HAS_DFLT_ROUTER BIT(0) }; @@ -563,7 +563,7 @@ int call_fib6_notifiers(struct net *net, enum fib_event_type event_type, int __net_init fib6_notifier_init(struct net *net); void __net_exit fib6_notifier_exit(struct net *net); -unsigned int fib6_tables_seq_read(struct net *net); +unsigned int fib6_tables_seq_read(const struct net *net); int fib6_tables_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); @@ -632,7 +632,7 @@ void fib6_rules_cleanup(void); bool fib6_rule_default(const struct fib_rule *rule); int fib6_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); -unsigned int fib6_rules_seq_read(struct net *net); +unsigned int fib6_rules_seq_read(const struct net *net); static inline bool fib6_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, @@ -676,7 +676,7 @@ static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb, { return 0; } -static inline unsigned int fib6_rules_seq_read(struct net *net) +static inline unsigned int fib6_rules_seq_read(const struct net *net) { return 0; } diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 967e4dc555fa..a113c11ab56b 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -347,7 +347,7 @@ static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb, return 0; } -static inline unsigned int fib4_rules_seq_read(struct net *net) +static inline unsigned int fib4_rules_seq_read(const struct net *net) { return 0; } @@ -411,7 +411,7 @@ static inline bool fib4_has_custom_rules(const struct net *net) bool fib4_rule_default(const struct fib_rule *rule); int fib4_rules_dump(struct net *net, struct notifier_block *nb, struct netlink_ext_ack *extack); -unsigned int fib4_rules_seq_read(struct net *net); +unsigned int fib4_rules_seq_read(const struct net *net); static inline bool fib4_rules_early_flow_dissect(struct net *net, struct sk_buff *skb, @@ -449,8 +449,21 @@ int fib_gw_from_via(struct fib_config *cfg, struct nlattr *nla, __be32 fib_compute_spec_dst(struct sk_buff *skb); bool fib_info_nh_uses_dev(struct fib_info *fi, const struct net_device *dev); int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, - u8 tos, int oif, struct net_device *dev, + dscp_t dscp, int oif, struct net_device *dev, struct in_device *idev, u32 *itag); + +static inline enum skb_drop_reason +fib_validate_source_reason(struct sk_buff *skb, __be32 src, __be32 dst, + dscp_t dscp, int oif, struct net_device *dev, + struct in_device *idev, u32 *itag) +{ + int err = fib_validate_source(skb, src, dst, dscp, oif, dev, idev, + itag); + if (err < 0) + return -err; + return SKB_NOT_DROPPED_YET; +} + #ifdef CONFIG_IP_ROUTE_CLASSID static inline int fib_num_tclassid_users(struct net *net) { diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 6a070478254d..1aa31bdb2b31 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h @@ -439,7 +439,8 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op, int ip_tunnel_encap_setup(struct ip_tunnel *t, struct ip_tunnel_encap *ipencap); -static inline bool pskb_inet_may_pull(struct sk_buff *skb) +static inline enum skb_drop_reason +pskb_inet_may_pull_reason(struct sk_buff *skb) { int nhlen; @@ -456,16 +457,22 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb) nhlen = 0; } - return pskb_network_may_pull(skb, nhlen); + return pskb_network_may_pull_reason(skb, nhlen); +} + +static inline bool pskb_inet_may_pull(struct sk_buff *skb) +{ + return pskb_inet_may_pull_reason(skb) == SKB_NOT_DROPPED_YET; } /* Variant of pskb_inet_may_pull(). */ -static inline bool skb_vlan_inet_prepare(struct sk_buff *skb, - bool inner_proto_inherit) +static inline enum skb_drop_reason +skb_vlan_inet_prepare(struct sk_buff *skb, bool inner_proto_inherit) { int nhlen = 0, maclen = inner_proto_inherit ? 0 : ETH_HLEN; __be16 type = skb->protocol; + enum skb_drop_reason reason; /* Essentially this is skb_protocol(skb, true) * And we get MAC len. @@ -486,11 +493,13 @@ static inline bool skb_vlan_inet_prepare(struct sk_buff *skb, /* For ETH_P_IPV6/ETH_P_IP we make sure to pull * a base network header in skb->head. */ - if (!pskb_may_pull(skb, maclen + nhlen)) - return false; + reason = pskb_may_pull_reason(skb, maclen + nhlen); + if (reason) + return reason; skb_set_network_header(skb, maclen); - return true; + + return SKB_NOT_DROPPED_YET; } static inline int ip_encap_hlen(struct ip_tunnel_encap *e) diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h index 7af1082ea9a0..b80e474cb0aa 100644 --- a/include/net/iw_handler.h +++ b/include/net/iw_handler.h @@ -279,8 +279,6 @@ #define IW_DESCR_FLAG_RESTRICT 0x0004 /* GET : request is ROOT only */ /* SET : Omit payload from generated iwevent */ #define IW_DESCR_FLAG_NOMAX 0x0008 /* GET : no limit on request size */ -/* Driver level flags */ -#define IW_DESCR_FLAG_WAIT 0x0100 /* Wait for driver event */ /****************************** TYPES ******************************/ @@ -373,11 +371,10 @@ struct iw_handler_def { */ struct iw_ioctl_description { __u8 header_type; /* NULL, iw_point or other */ - __u8 token_type; /* Future */ + __u8 flags; /* Special handling of the request */ __u16 token_size; /* Granularity of payload */ __u16 min_tokens; /* Min acceptable token number */ __u16 max_tokens; /* Max acceptable token number */ - __u32 flags; /* Special handling of the request */ }; /* Need to think of short header translation table. Later. */ @@ -404,26 +401,6 @@ struct iw_spy_data { u_char spy_thr_under[IW_MAX_SPY]; }; -/* --------------------- DEVICE WIRELESS DATA --------------------- */ -/* - * This is all the wireless data specific to a device instance that - * is managed by the core of Wireless Extensions or the 802.11 layer. - * We only keep pointer to those structures, so that a driver is free - * to share them between instances. - * This structure should be initialised before registering the device. - * Access to this data follow the same rules as any other struct net_device - * data (i.e. valid as long as struct net_device exist, same locking rules). - */ -/* Forward declaration */ -struct libipw_device; -/* The struct */ -struct iw_public_data { - /* Driver enhanced spy support */ - struct iw_spy_data * spy_data; - /* Legacy structure managed by the ipw2x00-specific IEEE 802.11 layer */ - struct libipw_device * libipw; -}; - /**************************** PROTOTYPES ****************************/ /* * Functions part of the Wireless Extensions (defined in net/wireless/wext-core.c). @@ -443,22 +420,6 @@ static inline void wireless_nlevent_flush(void) {} /* We may need a function to send a stream of events to user space. * More on that later... */ -/* Standard handler for SIOCSIWSPY */ -int iw_handler_set_spy(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *wrqu, char *extra); -/* Standard handler for SIOCGIWSPY */ -int iw_handler_get_spy(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *wrqu, char *extra); -/* Standard handler for SIOCSIWTHRSPY */ -int iw_handler_set_thrspy(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *wrqu, char *extra); -/* Standard handler for SIOCGIWTHRSPY */ -int iw_handler_get_thrspy(struct net_device *dev, struct iw_request_info *info, - union iwreq_data *wrqu, char *extra); -/* Driver call to update spy records */ -void wireless_spy_update(struct net_device *dev, unsigned char *address, - struct iw_quality *wstats); - /************************* INLINE FUNCTIONS *************************/ /* * Function that are so simple that it's more efficient inlining them diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 031c661aa14d..2d6141f28b53 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h @@ -78,7 +78,7 @@ static inline int l3mdev_master_ifindex_by_index(struct net *net, int ifindex) struct net_device *dev; int rc = 0; - if (likely(ifindex)) { + if (ifindex) { rcu_read_lock(); dev = dev_get_by_index_rcu(net, ifindex); diff --git a/include/net/lib80211.h b/include/net/lib80211.h deleted file mode 100644 index fd0f15d87d80..000000000000 --- a/include/net/lib80211.h +++ /dev/null @@ -1,122 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * lib80211.h -- common bits for IEEE802.11 wireless drivers - * - * Copyright (c) 2008, John W. Linville <linville@tuxdriver.com> - * - * Some bits copied from old ieee80211 component, w/ original copyright - * notices below: - * - * Original code based on Host AP (software wireless LAN access point) driver - * for Intersil Prism2/2.5/3. - * - * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen - * <j@w1.fi> - * Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi> - * - * Adaption to a generic IEEE 802.11 stack by James Ketrenos - * <jketreno@linux.intel.com> - * - * Copyright (c) 2004, Intel Corporation - * - */ - -#ifndef LIB80211_H -#define LIB80211_H - -#include <linux/types.h> -#include <linux/list.h> -#include <linux/atomic.h> -#include <linux/if.h> -#include <linux/skbuff.h> -#include <linux/ieee80211.h> -#include <linux/timer.h> -#include <linux/seq_file.h> - -#define NUM_WEP_KEYS 4 - -enum { - IEEE80211_CRYPTO_TKIP_COUNTERMEASURES = (1 << 0), -}; - -struct module; - -struct lib80211_crypto_ops { - const char *name; - struct list_head list; - - /* init new crypto context (e.g., allocate private data space, - * select IV, etc.); returns NULL on failure or pointer to allocated - * private data on success */ - void *(*init) (int keyidx); - - /* deinitialize crypto context and free allocated private data */ - void (*deinit) (void *priv); - - /* encrypt/decrypt return < 0 on error or >= 0 on success. The return - * value from decrypt_mpdu is passed as the keyidx value for - * decrypt_msdu. skb must have enough head and tail room for the - * encryption; if not, error will be returned; these functions are - * called for all MPDUs (i.e., fragments). - */ - int (*encrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv); - int (*decrypt_mpdu) (struct sk_buff * skb, int hdr_len, void *priv); - - /* These functions are called for full MSDUs, i.e. full frames. - * These can be NULL if full MSDU operations are not needed. */ - int (*encrypt_msdu) (struct sk_buff * skb, int hdr_len, void *priv); - int (*decrypt_msdu) (struct sk_buff * skb, int keyidx, int hdr_len, - void *priv); - - int (*set_key) (void *key, int len, u8 * seq, void *priv); - int (*get_key) (void *key, int len, u8 * seq, void *priv); - - /* procfs handler for printing out key information and possible - * statistics */ - void (*print_stats) (struct seq_file *m, void *priv); - - /* Crypto specific flag get/set for configuration settings */ - unsigned long (*get_flags) (void *priv); - unsigned long (*set_flags) (unsigned long flags, void *priv); - - /* maximum number of bytes added by encryption; encrypt buf is - * allocated with extra_prefix_len bytes, copy of in_buf, and - * extra_postfix_len; encrypt need not use all this space, but - * the result must start at the beginning of the buffer and correct - * length must be returned */ - int extra_mpdu_prefix_len, extra_mpdu_postfix_len; - int extra_msdu_prefix_len, extra_msdu_postfix_len; - - struct module *owner; -}; - -struct lib80211_crypt_data { - struct list_head list; /* delayed deletion list */ - const struct lib80211_crypto_ops *ops; - void *priv; - atomic_t refcnt; -}; - -struct lib80211_crypt_info { - char *name; - /* Most clients will already have a lock, - so just point to that. */ - spinlock_t *lock; - - struct lib80211_crypt_data *crypt[NUM_WEP_KEYS]; - int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */ - struct list_head crypt_deinit_list; - struct timer_list crypt_deinit_timer; - int crypt_quiesced; -}; - -int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name, - spinlock_t *lock); -void lib80211_crypt_info_free(struct lib80211_crypt_info *info); -int lib80211_register_crypto_ops(const struct lib80211_crypto_ops *ops); -int lib80211_unregister_crypto_ops(const struct lib80211_crypto_ops *ops); -const struct lib80211_crypto_ops *lib80211_get_crypto_ops(const char *name); -void lib80211_crypt_delayed_deinit(struct lib80211_crypt_info *info, - struct lib80211_crypt_data **crypt); - -#endif /* LIB80211_H */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 333e0fae6796..a97c9f85ae9a 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -213,7 +213,7 @@ struct ieee80211_low_level_stats { * @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel, * this is used only with channel switching with CSA - * @IEEE80211_CHANCTX_CHANGE_MIN_WIDTH: The min required channel width changed + * @IEEE80211_CHANCTX_CHANGE_MIN_DEF: The min chandef changed * @IEEE80211_CHANCTX_CHANGE_AP: The AP channel definition changed, so (wider * bandwidth) OFDMA settings need to be changed * @IEEE80211_CHANCTX_CHANGE_PUNCTURING: The punctured channel(s) bitmap @@ -224,7 +224,7 @@ enum ieee80211_chanctx_change { IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1), IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2), IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3), - IEEE80211_CHANCTX_CHANGE_MIN_WIDTH = BIT(4), + IEEE80211_CHANCTX_CHANGE_MIN_DEF = BIT(4), IEEE80211_CHANCTX_CHANGE_AP = BIT(5), IEEE80211_CHANCTX_CHANGE_PUNCTURING = BIT(6), }; @@ -740,6 +740,19 @@ struct ieee80211_parsed_tpe { * @eht_80mhz_full_bw_ul_mumimo: in AP-mode, does this BSS support the * reception of an EHT TB PPDU on an RU that spans the entire PPDU * bandwidth + * @bss_param_ch_cnt: in BSS-mode, the BSS params change count. This + * information is the latest known value. It can come from this link's + * beacon or from a beacon sent by another link. + * @bss_param_ch_cnt_link_id: in BSS-mode, the link_id to which the beacon + * that updated &bss_param_ch_cnt belongs. E.g. if link 1 doesn't hear + * its beacons, and link 2 sent a beacon with an RNR element that updated + * link 1's BSS params change count, then, link 1's + * bss_param_ch_cnt_link_id will be 2. That means that link 1 knows that + * link 2 was the link that updated its bss_param_ch_cnt value. + * In case link 1 hears its beacon again, bss_param_ch_cnt_link_id will + * be updated to 1, even if bss_param_ch_cnt didn't change. This allows + * the link to know that it heard the latest value from its own beacon + * (as opposed to hearing its value from another link's beacon). */ struct ieee80211_bss_conf { struct ieee80211_vif *vif; @@ -834,6 +847,8 @@ struct ieee80211_bss_conf { bool eht_su_beamformee; bool eht_mu_beamformer; bool eht_80mhz_full_bw_ul_mumimo; + u8 bss_param_ch_cnt; + u8 bss_param_ch_cnt_link_id; }; /** @@ -1448,8 +1463,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) * @RX_FLAG_AMPDU_IS_LAST: this subframe is the last subframe of the A-MPDU * @RX_FLAG_AMPDU_DELIM_CRC_ERROR: A delimiter CRC error has been detected * on this subframe - * @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC - * is stored in the @ampdu_delimiter_crc field) * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was * done by the hardware * @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without @@ -1521,7 +1534,7 @@ enum mac80211_rx_flags { RX_FLAG_AMPDU_LAST_KNOWN = BIT(12), RX_FLAG_AMPDU_IS_LAST = BIT(13), RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14), - RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15), + /* one free bit at 15 */ RX_FLAG_MACTIME = BIT(16) | BIT(17), RX_FLAG_MACTIME_PLCP_START = 1 << 16, RX_FLAG_MACTIME_START = 2 << 16, @@ -1618,7 +1631,6 @@ enum mac80211_rx_encoding { * @rx_flags: internal RX flags for mac80211 * @ampdu_reference: A-MPDU reference number, must be a different value for * each A-MPDU but the same for each subframe within one A-MPDU - * @ampdu_delimiter_crc: A-MPDU delimiter CRC * @zero_length_psdu_type: radiotap type of the 0-length PSDU * @link_valid: if the link which is identified by @link_id is valid. This flag * is set only when connection is MLO. @@ -1656,7 +1668,6 @@ struct ieee80211_rx_status { s8 signal; u8 chains; s8 chain_signal[IEEE80211_MAX_CHAINS]; - u8 ampdu_delimiter_crc; u8 zero_length_psdu_type; u8 link_valid:1, link_id:4; }; @@ -2683,6 +2694,11 @@ struct ieee80211_txq { * a virtual monitor interface when monitor interfaces are the only * active interfaces. * + * @IEEE80211_HW_NO_VIRTUAL_MONITOR: The driver would like to be informed + * of any monitor interface, as well as their configured channel. + * This is useful for supporting multiple monitor interfaces on different + * channels. + * * @IEEE80211_HW_NO_AUTO_VIF: The driver would like for no wlanX to * be created. It is expected user-space will create vifs as * desired (and thus have them named as desired). @@ -2842,6 +2858,7 @@ enum ieee80211_hw_flags { IEEE80211_HW_SUPPORTS_DYNAMIC_PS, IEEE80211_HW_MFP_CAPABLE, IEEE80211_HW_WANT_MONITOR_VIF, + IEEE80211_HW_NO_VIRTUAL_MONITOR, IEEE80211_HW_NO_AUTO_VIF, IEEE80211_HW_SW_CRYPTO_CONTROL, IEEE80211_HW_SUPPORT_FAST_XMIT, @@ -4075,8 +4092,8 @@ struct ieee80211_prep_tx_info { * in @sta_state. * The callback can sleep. * - * @sta_rc_update: Notifies the driver of changes to the bitrates that can be - * used to transmit to the station. The changes are advertised with bits + * @link_sta_rc_update: Notifies the driver of changes to the bitrates that can + * be used to transmit to the station. The changes are advertised with bits * from &enum ieee80211_rate_control_changed and the values are reflected * in the station data. This callback should only be used when the driver * uses hardware rate control (%IEEE80211_HW_HAS_RATE_CONTROL) since @@ -4444,6 +4461,12 @@ struct ieee80211_prep_tx_info { * if the requested TID-To-Link mapping can be accepted or not. * If it's not accepted the driver may suggest a preferred mapping and * modify @ttlm parameter with the suggested TID-to-Link mapping. + * @prep_add_interface: prepare for interface addition. This can be used by + * drivers to prepare for the addition of a new interface, e.g., allocate + * the needed resources etc. This callback doesn't guarantee that an + * interface with the specified type would be added, and thus drivers that + * implement this callback need to handle such cases. The type is the full + * &enum nl80211_iftype. */ struct ieee80211_ops { void (*tx)(struct ieee80211_hw *hw, @@ -4560,10 +4583,10 @@ struct ieee80211_ops { void (*sta_pre_rcu_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); - void (*sta_rc_update)(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, - u32 changed); + void (*link_sta_rc_update)(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta, + u32 changed); void (*sta_rate_tbl_update)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -4828,6 +4851,8 @@ struct ieee80211_ops { enum ieee80211_neg_ttlm_res (*can_neg_ttlm)(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_neg_ttlm *ttlm); + void (*prep_add_interface)(struct ieee80211_hw *hw, + enum nl80211_iftype type); }; /** @@ -7647,12 +7672,12 @@ static inline bool ieee80211_is_tx_data(struct sk_buff *skb) * * - change_vif_links(0x11) * - unassign_vif_chanctx(link_id=0) + * - assign_vif_chanctx(link_id=4) * - change_sta_links(0x11) for each affected STA (the AP) * (TDLS connections on now inactive links should be torn down) * - remove group keys on the old link (link_id 0) * - add new group keys (GTK/IGTK/BIGTK) on the new link (link_id 4) * - change_sta_links(0x10) for each affected STA (the AP) - * - assign_vif_chanctx(link_id=4) * - change_vif_links(0x10) * * Return: 0 on success. An error code otherwise. @@ -7681,6 +7706,33 @@ void ieee80211_set_active_links_async(struct ieee80211_vif *vif, */ void ieee80211_send_teardown_neg_ttlm(struct ieee80211_vif *vif); +/** + * ieee80211_chan_width_to_rx_bw - convert channel width to STA RX bandwidth + * @width: the channel width value to convert + * Return: the STA RX bandwidth value for the channel width + */ +static inline enum ieee80211_sta_rx_bandwidth +ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) +{ + switch (width) { + default: + WARN_ON_ONCE(1); + fallthrough; + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + return IEEE80211_STA_RX_BW_20; + case NL80211_CHAN_WIDTH_40: + return IEEE80211_STA_RX_BW_40; + case NL80211_CHAN_WIDTH_80: + return IEEE80211_STA_RX_BW_80; + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_80P80: + return IEEE80211_STA_RX_BW_160; + case NL80211_CHAN_WIDTH_320: + return IEEE80211_STA_RX_BW_320; + } +} + /* for older drivers - let's not document these ... */ int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index de47fa533b15..90f56656b572 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -267,7 +267,8 @@ struct gdma_event { struct gdma_queue; struct mana_eq { - struct gdma_queue *eq; + struct gdma_queue *eq; + struct dentry *mana_eq_debugfs; }; typedef void gdma_eq_callback(void *context, struct gdma_queue *q, @@ -365,6 +366,7 @@ struct gdma_irq_context { struct gdma_context { struct device *dev; + struct dentry *mana_pci_debugfs; /* Per-vPort max number of queues */ unsigned int max_num_queues; @@ -878,5 +880,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, u32 resp_len, void *resp); int mana_gd_destroy_dma_region(struct gdma_context *gc, u64 dma_region_handle); +void mana_register_debugfs(void); +void mana_unregister_debugfs(void); #endif /* _GDMA_H */ diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index f2a5200d8a0f..0d00b24eacaf 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -43,7 +43,7 @@ enum TRI_STATE { * size beyond this value gets rejected by __alloc_page() call. */ #define MAX_RX_BUFFERS_PER_QUEUE 8192 -#define DEF_RX_BUFFERS_PER_QUEUE 512 +#define DEF_RX_BUFFERS_PER_QUEUE 1024 #define MIN_RX_BUFFERS_PER_QUEUE 128 /* This max value for TX buffers is derived as the maximum allocatable @@ -350,6 +350,7 @@ struct mana_rxq { int xdp_rc; /* XDP redirect return code */ struct page_pool *page_pool; + struct dentry *mana_rx_debugfs; /* MUST BE THE LAST MEMBER: * Each receive buffer has an associated mana_recv_buf_oob. @@ -363,6 +364,8 @@ struct mana_tx_qp { struct mana_cq tx_cq; mana_handle_t tx_object; + + struct dentry *mana_tx_debugfs; }; struct mana_ethtool_stats { @@ -407,6 +410,7 @@ struct mana_context { u16 num_ports; struct mana_eq *eqs; + struct dentry *mana_eqs_debugfs; struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; }; @@ -468,6 +472,9 @@ struct mana_port_context { bool port_st_save; /* Saved port state */ struct mana_ethtool_stats eth_stats; + + /* Debugfs */ + struct dentry *mana_port_debugfs; }; netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); @@ -494,6 +501,7 @@ int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); extern const struct ethtool_ops mana_ethtool_ops; +extern struct dentry *mana_debugfs_root; /* A CQ can be created not associated with any EQ */ #define GDMA_CQ_NO_EQ 0xffff diff --git a/include/net/mctp.h b/include/net/mctp.h index 28d59ae94ca3..1ecbff7116f6 100644 --- a/include/net/mctp.h +++ b/include/net/mctp.h @@ -298,4 +298,22 @@ void mctp_routes_exit(void); int mctp_device_init(void); void mctp_device_exit(void); +/* MCTP IDs and Codes from DMTF specification + * "DSP0239 Management Component Transport Protocol (MCTP) IDs and Codes" + * https://www.dmtf.org/sites/default/files/standards/documents/DSP0239_1.11.1.pdf + */ +enum mctp_phys_binding { + MCTP_PHYS_BINDING_UNSPEC = 0x00, + MCTP_PHYS_BINDING_SMBUS = 0x01, + MCTP_PHYS_BINDING_PCIE_VDM = 0x02, + MCTP_PHYS_BINDING_USB = 0x03, + MCTP_PHYS_BINDING_KCS = 0x04, + MCTP_PHYS_BINDING_SERIAL = 0x05, + MCTP_PHYS_BINDING_I3C = 0x06, + MCTP_PHYS_BINDING_MMBI = 0x07, + MCTP_PHYS_BINDING_PCC = 0x08, + MCTP_PHYS_BINDING_UCIE = 0x09, + MCTP_PHYS_BINDING_VENDOR = 0xFF, +}; + #endif /* __NET_MCTP_H */ diff --git a/include/net/mctpdevice.h b/include/net/mctpdevice.h index 5c0d04b5c12c..957d9ef924c5 100644 --- a/include/net/mctpdevice.h +++ b/include/net/mctpdevice.h @@ -22,6 +22,7 @@ struct mctp_dev { refcount_t refs; unsigned int net; + enum mctp_phys_binding binding; const struct mctp_netdev_ops *ops; @@ -44,7 +45,8 @@ struct mctp_dev *mctp_dev_get_rtnl(const struct net_device *dev); struct mctp_dev *__mctp_dev_get(const struct net_device *dev); int mctp_register_netdev(struct net_device *dev, - const struct mctp_netdev_ops *ops); + const struct mctp_netdev_ops *ops, + enum mctp_phys_binding binding); void mctp_unregister_netdev(struct net_device *dev); void mctp_dev_hold(struct mctp_dev *mdev); diff --git a/include/net/neighbour.h b/include/net/neighbour.h index a44f262a7384..9a832cab5b1d 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -29,6 +29,7 @@ #include <linux/sysctl.h> #include <linux/workqueue.h> #include <net/rtnetlink.h> +#include <net/neighbour_tables.h> /* * NUD stands for "neighbor unreachability detection" @@ -135,7 +136,8 @@ struct neigh_statistics { #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) struct neighbour { - struct neighbour __rcu *next; + struct hlist_node hash; + struct hlist_node dev_list; struct neigh_table *tbl; struct neigh_parms *parms; unsigned long confirmed; @@ -190,7 +192,7 @@ struct pneigh_entry { #define NEIGH_NUM_HASH_RND 4 struct neigh_hash_table { - struct neighbour __rcu **hash_buckets; + struct hlist_head *hash_heads; unsigned int hash_shift; __u32 hash_rnd[NEIGH_NUM_HASH_RND]; struct rcu_head rcu; @@ -236,14 +238,6 @@ struct neigh_table { struct pneigh_entry **phash_buckets; }; -enum { - NEIGH_ARP_TABLE = 0, - NEIGH_ND_TABLE = 1, - NEIGH_DN_TABLE = 2, - NEIGH_NR_TABLES, - NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */ -}; - static inline int neigh_parms_family(struct neigh_parms *p) { return p->tbl->family; @@ -276,6 +270,12 @@ static inline void *neighbour_priv(const struct neighbour *n) extern const struct nla_policy nda_policy[]; +#define neigh_for_each_in_bucket(pos, head) hlist_for_each_entry(pos, head, hash) +#define neigh_for_each_in_bucket_rcu(pos, head) \ + hlist_for_each_entry_rcu(pos, head, hash) +#define neigh_for_each_in_bucket_safe(pos, tmp, head) \ + hlist_for_each_entry_safe(pos, tmp, head, hash) + static inline bool neigh_key_eq32(const struct neighbour *n, const void *pkey) { return *(const u32 *)n->primary_key == *(const u32 *)pkey; @@ -304,12 +304,9 @@ static inline struct neighbour *___neigh_lookup_noref( u32 hash_val; hash_val = hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift); - for (n = rcu_dereference(nht->hash_buckets[hash_val]); - n != NULL; - n = rcu_dereference(n->next)) { + neigh_for_each_in_bucket_rcu(n, &nht->hash_heads[hash_val]) if (n->dev == dev && key_eq(n, pkey)) return n; - } return NULL; } @@ -350,7 +347,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb, int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags, u32 nlmsg_pid); void __neigh_set_probe_once(struct neighbour *neigh); -bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl); +bool neigh_remove_one(struct neighbour *ndel); void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev); int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev); int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev); diff --git a/include/net/neighbour_tables.h b/include/net/neighbour_tables.h new file mode 100644 index 000000000000..bcffbe8f7601 --- /dev/null +++ b/include/net/neighbour_tables.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _NET_NEIGHBOUR_TABLES_H +#define _NET_NEIGHBOUR_TABLES_H + +enum { + NEIGH_ARP_TABLE = 0, + NEIGH_ND_TABLE = 1, + NEIGH_NR_TABLES, + NEIGH_LINK_TABLE = NEIGH_NR_TABLES /* Pseudo table for neigh_xmit */ +}; + +#endif diff --git a/include/net/net_debug.h b/include/net/net_debug.h index 1e74684cbbdb..47f7a4a878b9 100644 --- a/include/net/net_debug.h +++ b/include/net/net_debug.h @@ -27,7 +27,7 @@ void netdev_info(const struct net_device *dev, const char *format, ...); #define netdev_level_once(level, dev, fmt, ...) \ do { \ - static bool __section(".data.once") __print_once; \ + static bool __section(".data..once") __print_once; \ \ if (!__print_once) { \ __print_once = true; \ @@ -149,9 +149,11 @@ do { \ #if defined(CONFIG_DEBUG_NET) -#define DEBUG_NET_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) +#define DEBUG_NET_WARN_ON_ONCE(cond) ((void)WARN_ON_ONCE(cond)) +#define DEBUG_NET_WARN_ONCE(cond, format...) ((void)WARN_ONCE(cond, format)) #else #define DEBUG_NET_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) +#define DEBUG_NET_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif #endif /* _LINUX_NET_DEBUG_H */ diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index e67b483cc8bb..873c0f9fdac6 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h @@ -188,6 +188,10 @@ struct net { #if IS_ENABLED(CONFIG_SMC) struct netns_smc smc; #endif +#ifdef CONFIG_DEBUG_NET_SMALL_RTNL + /* Move to a better place when the config guard is removed. */ + struct mutex rtnl_mutex; +#endif } __randomize_layout; #include <linux/seq_file_net.h> diff --git a/include/net/net_shaper.h b/include/net/net_shaper.h new file mode 100644 index 000000000000..5c3f49b52fe9 --- /dev/null +++ b/include/net/net_shaper.h @@ -0,0 +1,120 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef _NET_SHAPER_H_ +#define _NET_SHAPER_H_ + +#include <linux/types.h> + +#include <uapi/linux/net_shaper.h> + +struct net_device; +struct devlink; +struct netlink_ext_ack; + +enum net_shaper_binding_type { + NET_SHAPER_BINDING_TYPE_NETDEV, + /* NET_SHAPER_BINDING_TYPE_DEVLINK_PORT */ +}; + +struct net_shaper_binding { + enum net_shaper_binding_type type; + union { + struct net_device *netdev; + struct devlink *devlink; + }; +}; + +struct net_shaper_handle { + enum net_shaper_scope scope; + u32 id; +}; + +/** + * struct net_shaper - represents a shaping node on the NIC H/W + * zeroed field are considered not set. + * @parent: Unique identifier for the shaper parent, usually implied + * @handle: Unique identifier for this shaper + * @metric: Specify if the rate limits refers to PPS or BPS + * @bw_min: Minimum guaranteed rate for this shaper + * @bw_max: Maximum peak rate allowed for this shaper + * @burst: Maximum burst for the peek rate of this shaper + * @priority: Scheduling priority for this shaper + * @weight: Scheduling weight for this shaper + */ +struct net_shaper { + struct net_shaper_handle parent; + struct net_shaper_handle handle; + enum net_shaper_metric metric; + u64 bw_min; + u64 bw_max; + u64 burst; + u32 priority; + u32 weight; + + /* private: */ + u32 leaves; /* accounted only for NODE scope */ + struct rcu_head rcu; +}; + +/** + * struct net_shaper_ops - Operations on device H/W shapers + * + * The operations applies to either net_device and devlink objects. + * The initial shaping configuration at device initialization is empty: + * does not constraint the rate in any way. + * The network core keeps track of the applied user-configuration in + * the net_device or devlink structure. + * The operations are serialized via a per device lock. + * + * Device not supporting any kind of nesting should not provide the + * group operation. + * + * Each shaper is uniquely identified within the device with a 'handle' + * comprising the shaper scope and a scope-specific id. + */ +struct net_shaper_ops { + /** + * @group: create the specified shapers scheduling group + * + * Nest the @leaves shapers identified under the * @node shaper. + * All the shapers belong to the device specified by @binding. + * The @leaves arrays size is specified by @leaves_count. + * Create either the @leaves and the @node shaper; or if they already + * exists, links them together in the desired way. + * @leaves scope must be NET_SHAPER_SCOPE_QUEUE. + */ + int (*group)(struct net_shaper_binding *binding, int leaves_count, + const struct net_shaper *leaves, + const struct net_shaper *node, + struct netlink_ext_ack *extack); + + /** + * @set: Updates the specified shaper + * + * Updates or creates the @shaper on the device specified by @binding. + */ + int (*set)(struct net_shaper_binding *binding, + const struct net_shaper *shaper, + struct netlink_ext_ack *extack); + + /** + * @delete: Removes the specified shaper + * + * Removes the shaper configuration as identified by the given @handle + * on the device specified by @binding, restoring the default behavior. + */ + int (*delete)(struct net_shaper_binding *binding, + const struct net_shaper_handle *handle, + struct netlink_ext_ack *extack); + + /** + * @capabilities: get the shaper features supported by the device + * + * Fills the bitmask @cap with the supported capabilities for the + * specified @scope and device specified by @binding. + */ + void (*capabilities)(struct net_shaper_binding *binding, + enum net_shaper_scope scope, unsigned long *cap); +}; + +#endif diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 066a3ea33b12..80a537ac26cd 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1467,7 +1467,8 @@ struct nft_flowtable { struct nf_flowtable data; }; -struct nft_flowtable *nft_flowtable_lookup(const struct nft_table *table, +struct nft_flowtable *nft_flowtable_lookup(const struct net *net, + const struct nft_table *table, const struct nlattr *nla, u8 genmask); @@ -1758,28 +1759,29 @@ enum nft_trans_elem_flags { NFT_TRANS_UPD_EXPIRATION = (1 << 1), }; +struct nft_elem_update { + u64 timeout; + u64 expiration; + u8 flags; +}; + +struct nft_trans_one_elem { + struct nft_elem_priv *priv; + struct nft_elem_update *update; +}; + struct nft_trans_elem { struct nft_trans nft_trans; struct nft_set *set; - struct nft_elem_priv *elem_priv; - u64 timeout; - u64 expiration; - u8 update_flags; bool bound; + unsigned int nelems; + struct nft_trans_one_elem elems[] __counted_by(nelems); }; #define nft_trans_container_elem(t) \ container_of(t, struct nft_trans_elem, nft_trans) #define nft_trans_elem_set(trans) \ nft_trans_container_elem(trans)->set -#define nft_trans_elem_priv(trans) \ - nft_trans_container_elem(trans)->elem_priv -#define nft_trans_elem_update_flags(trans) \ - nft_trans_container_elem(trans)->update_flags -#define nft_trans_elem_timeout(trans) \ - nft_trans_container_elem(trans)->timeout -#define nft_trans_elem_expiration(trans) \ - nft_trans_container_elem(trans)->expiration #define nft_trans_elem_set_bound(trans) \ nft_trans_container_elem(trans)->bound diff --git a/include/net/netlabel.h b/include/net/netlabel.h index 529160f76cac..02914b1df38b 100644 --- a/include/net/netlabel.h +++ b/include/net/netlabel.h @@ -97,7 +97,7 @@ struct calipso_doi; /* NetLabel audit information */ struct netlbl_audit { - u32 secid; + struct lsm_prop prop; kuid_t loginuid; unsigned int sessionid; }; @@ -208,6 +208,7 @@ struct netlbl_lsm_secattr { * struct netlbl_calipso_ops - NetLabel CALIPSO operations * @doi_add: add a CALIPSO DOI * @doi_free: free a CALIPSO DOI + * @doi_remove: remove a CALIPSO DOI * @doi_getdef: returns a reference to a DOI * @doi_putdef: releases a reference of a DOI * @doi_walk: enumerate the DOI list diff --git a/include/net/netlink.h b/include/net/netlink.h index db6af207287c..39eaa6be6ca8 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -142,6 +142,8 @@ * nla_get_flag(nla) return 1 if flag is true * nla_get_msecs(nla) get payload for a msecs attribute * + * The same functions also exist with _default(). + * * Attribute Misc: * nla_memcpy(dest, nla, count) copy attribute into memory * nla_memcmp(nla, data, size) compare attribute with memory area @@ -469,6 +471,7 @@ struct nla_policy { .max = _len \ } #define NLA_POLICY_MIN_LEN(_len) NLA_POLICY_MIN(NLA_BINARY, _len) +#define NLA_POLICY_MAX_LEN(_len) NLA_POLICY_MAX(NLA_BINARY, _len) /** * struct nl_info - netlink source information @@ -1695,6 +1698,20 @@ static inline u32 nla_get_u32(const struct nlattr *nla) } /** + * nla_get_u32_default - return payload of u32 attribute or default + * @nla: u32 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline u32 nla_get_u32_default(const struct nlattr *nla, u32 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_u32(nla); +} + +/** * nla_get_be32 - return payload of __be32 attribute * @nla: __be32 netlink attribute */ @@ -1704,6 +1721,21 @@ static inline __be32 nla_get_be32(const struct nlattr *nla) } /** + * nla_get_be32_default - return payload of be32 attribute or default + * @nla: __be32 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline __be32 nla_get_be32_default(const struct nlattr *nla, + __be32 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_be32(nla); +} + +/** * nla_get_le32 - return payload of __le32 attribute * @nla: __le32 netlink attribute */ @@ -1713,6 +1745,21 @@ static inline __le32 nla_get_le32(const struct nlattr *nla) } /** + * nla_get_le32_default - return payload of le32 attribute or default + * @nla: __le32 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline __le32 nla_get_le32_default(const struct nlattr *nla, + __le32 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_le32(nla); +} + +/** * nla_get_u16 - return payload of u16 attribute * @nla: u16 netlink attribute */ @@ -1722,6 +1769,20 @@ static inline u16 nla_get_u16(const struct nlattr *nla) } /** + * nla_get_u16_default - return payload of u16 attribute or default + * @nla: u16 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline u16 nla_get_u16_default(const struct nlattr *nla, u16 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_u16(nla); +} + +/** * nla_get_be16 - return payload of __be16 attribute * @nla: __be16 netlink attribute */ @@ -1731,6 +1792,21 @@ static inline __be16 nla_get_be16(const struct nlattr *nla) } /** + * nla_get_be16_default - return payload of be16 attribute or default + * @nla: __be16 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline __be16 nla_get_be16_default(const struct nlattr *nla, + __be16 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_be16(nla); +} + +/** * nla_get_le16 - return payload of __le16 attribute * @nla: __le16 netlink attribute */ @@ -1740,6 +1816,21 @@ static inline __le16 nla_get_le16(const struct nlattr *nla) } /** + * nla_get_le16_default - return payload of le16 attribute or default + * @nla: __le16 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline __le16 nla_get_le16_default(const struct nlattr *nla, + __le16 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_le16(nla); +} + +/** * nla_get_u8 - return payload of u8 attribute * @nla: u8 netlink attribute */ @@ -1749,6 +1840,20 @@ static inline u8 nla_get_u8(const struct nlattr *nla) } /** + * nla_get_u8_default - return payload of u8 attribute or default + * @nla: u8 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline u8 nla_get_u8_default(const struct nlattr *nla, u8 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_u8(nla); +} + +/** * nla_get_u64 - return payload of u64 attribute * @nla: u64 netlink attribute */ @@ -1762,6 +1867,20 @@ static inline u64 nla_get_u64(const struct nlattr *nla) } /** + * nla_get_u64_default - return payload of u64 attribute or default + * @nla: u64 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline u64 nla_get_u64_default(const struct nlattr *nla, u64 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_u64(nla); +} + +/** * nla_get_uint - return payload of uint attribute * @nla: uint netlink attribute */ @@ -1773,6 +1892,20 @@ static inline u64 nla_get_uint(const struct nlattr *nla) } /** + * nla_get_uint_default - return payload of uint attribute or default + * @nla: uint netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline u64 nla_get_uint_default(const struct nlattr *nla, u64 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_uint(nla); +} + +/** * nla_get_be64 - return payload of __be64 attribute * @nla: __be64 netlink attribute */ @@ -1786,6 +1919,21 @@ static inline __be64 nla_get_be64(const struct nlattr *nla) } /** + * nla_get_be64_default - return payload of be64 attribute or default + * @nla: __be64 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline __be64 nla_get_be64_default(const struct nlattr *nla, + __be64 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_be64(nla); +} + +/** * nla_get_le64 - return payload of __le64 attribute * @nla: __le64 netlink attribute */ @@ -1795,6 +1943,21 @@ static inline __le64 nla_get_le64(const struct nlattr *nla) } /** + * nla_get_le64_default - return payload of le64 attribute or default + * @nla: __le64 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline __le64 nla_get_le64_default(const struct nlattr *nla, + __le64 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_le64(nla); +} + +/** * nla_get_s32 - return payload of s32 attribute * @nla: s32 netlink attribute */ @@ -1804,6 +1967,20 @@ static inline s32 nla_get_s32(const struct nlattr *nla) } /** + * nla_get_s32_default - return payload of s32 attribute or default + * @nla: s32 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline s32 nla_get_s32_default(const struct nlattr *nla, s32 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_s32(nla); +} + +/** * nla_get_s16 - return payload of s16 attribute * @nla: s16 netlink attribute */ @@ -1813,6 +1990,20 @@ static inline s16 nla_get_s16(const struct nlattr *nla) } /** + * nla_get_s16_default - return payload of s16 attribute or default + * @nla: s16 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline s16 nla_get_s16_default(const struct nlattr *nla, s16 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_s16(nla); +} + +/** * nla_get_s8 - return payload of s8 attribute * @nla: s8 netlink attribute */ @@ -1822,6 +2013,20 @@ static inline s8 nla_get_s8(const struct nlattr *nla) } /** + * nla_get_s8_default - return payload of s8 attribute or default + * @nla: s8 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline s8 nla_get_s8_default(const struct nlattr *nla, s8 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_s8(nla); +} + +/** * nla_get_s64 - return payload of s64 attribute * @nla: s64 netlink attribute */ @@ -1835,6 +2040,20 @@ static inline s64 nla_get_s64(const struct nlattr *nla) } /** + * nla_get_s64_default - return payload of s64 attribute or default + * @nla: s64 netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline s64 nla_get_s64_default(const struct nlattr *nla, s64 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_s64(nla); +} + +/** * nla_get_sint - return payload of uint attribute * @nla: uint netlink attribute */ @@ -1846,6 +2065,20 @@ static inline s64 nla_get_sint(const struct nlattr *nla) } /** + * nla_get_sint_default - return payload of sint attribute or default + * @nla: sint netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline s64 nla_get_sint_default(const struct nlattr *nla, s64 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_sint(nla); +} + +/** * nla_get_flag - return payload of flag attribute * @nla: flag netlink attribute */ @@ -1868,6 +2101,21 @@ static inline unsigned long nla_get_msecs(const struct nlattr *nla) } /** + * nla_get_msecs_default - return payload of msecs attribute or default + * @nla: msecs netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline unsigned long nla_get_msecs_default(const struct nlattr *nla, + unsigned long defvalue) +{ + if (!nla) + return defvalue; + return nla_get_msecs(nla); +} + +/** * nla_get_in_addr - return payload of IPv4 address attribute * @nla: IPv4 address netlink attribute */ @@ -1877,6 +2125,21 @@ static inline __be32 nla_get_in_addr(const struct nlattr *nla) } /** + * nla_get_in_addr_default - return payload of be32 attribute or default + * @nla: IPv4 address netlink attribute, may be %NULL + * @defvalue: default value to use if @nla is %NULL + * + * Return: the value of the attribute, or the default value if not present + */ +static inline __be32 nla_get_in_addr_default(const struct nlattr *nla, + __be32 defvalue) +{ + if (!nla) + return defvalue; + return nla_get_in_addr(nla); +} + +/** * nla_get_in6_addr - return payload of IPv6 address attribute * @nla: IPv6 address netlink attribute */ diff --git a/include/net/netns/core.h b/include/net/netns/core.h index 78214f1b43a2..9b36f0ff0c20 100644 --- a/include/net/netns/core.h +++ b/include/net/netns/core.h @@ -15,6 +15,7 @@ struct netns_core { int sysctl_somaxconn; int sysctl_optmem_max; u8 sysctl_txrehash; + u8 sysctl_tstamp_allow_data; #ifdef CONFIG_PROC_FS struct prot_inuse __percpu *prot_inuse; diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 276f622f3516..3c014170e001 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -76,6 +76,8 @@ struct netns_ipv4 { __cacheline_group_begin(netns_ipv4_read_rx); u8 sysctl_ip_early_demux; u8 sysctl_tcp_early_demux; + u8 sysctl_tcp_l3mdev_accept; + /* 3 bytes hole, try to pack */ int sysctl_tcp_reordering; int sysctl_tcp_rmem[3]; __cacheline_group_end(netns_ipv4_read_rx); @@ -151,9 +153,6 @@ struct netns_ipv4 { u8 sysctl_fwmark_reflect; u8 sysctl_tcp_fwmark_accept; -#ifdef CONFIG_NET_L3_MASTER_DEV - u8 sysctl_tcp_l3mdev_accept; -#endif u8 sysctl_tcp_mtu_probing; int sysctl_tcp_mtu_probe_floor; int sysctl_tcp_base_mss; @@ -263,12 +262,14 @@ struct netns_ipv4 { #endif struct fib_notifier_ops *notifier_ops; - unsigned int fib_seq; /* protected by rtnl_mutex */ + unsigned int fib_seq; /* writes protected by rtnl_mutex */ struct fib_notifier_ops *ipmr_notifier_ops; unsigned int ipmr_seq; /* protected by rtnl_mutex */ atomic_t rt_genid; siphash_key_t ip_id_key; + struct hlist_head *inet_addr_lst; + struct delayed_work addr_chk_work; }; #endif diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index ae60d6664095..23dd647fe024 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -43,6 +43,7 @@ struct netns_xfrm { struct hlist_head __rcu *state_bysrc; struct hlist_head __rcu *state_byspi; struct hlist_head __rcu *state_byseq; + struct hlist_head __percpu *state_cache_input; unsigned int state_hmask; unsigned int state_num; struct work_struct state_hash_work; diff --git a/include/net/nfc/nci.h b/include/net/nfc/nci.h index dc36519d16aa..09efcaed7c3f 100644 --- a/include/net/nfc/nci.h +++ b/include/net/nfc/nci.h @@ -475,7 +475,7 @@ struct nci_rf_discover_ntf { #define NCI_OP_RF_INTF_ACTIVATED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x05) struct activation_params_nfca_poll_iso_dep { __u8 rats_res_len; - __u8 rats_res[20]; + __u8 rats_res[NFC_ATS_MAXSIZE]; }; struct activation_params_nfcb_poll_iso_dep { diff --git a/include/net/nfc/nci_core.h b/include/net/nfc/nci_core.h index ea8595651c38..e180bdf2f82b 100644 --- a/include/net/nfc/nci_core.h +++ b/include/net/nfc/nci_core.h @@ -265,6 +265,10 @@ struct nci_dev { /* stored during intf_activated_ntf */ __u8 remote_gb[NFC_MAX_GT_LEN]; __u8 remote_gb_len; + + /* stored during intf_activated_ntf */ + __u8 target_ats[NFC_ATS_MAXSIZE]; + __u8 target_ats_len; }; /* ----- NCI Devices ----- */ diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 3a3781838c67..127e6c7d910d 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -86,6 +86,8 @@ struct nfc_ops { * is a type A one. The %sens_res most significant byte must be byte 2 * as described by the NFC Forum digital specification (i.e. the platform * configuration one) while %sens_res least significant byte is byte 1. + * @ats_len: length of Answer To Select in bytes + * @ats: Answer To Select returned by an ISO 14443 Type A target upon activation */ struct nfc_target { u32 idx; @@ -105,6 +107,8 @@ struct nfc_target { u8 is_iso15693; u8 iso15693_dsfid; u8 iso15693_uid[NFC_ISO15693_UID_MAXSIZE]; + u8 ats_len; + u8 ats[NFC_ATS_MAXSIZE]; }; /** diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index e9dc8dca5817..37a3e83531c6 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h @@ -11,13 +11,13 @@ #define PN_DEV_H #include <linux/list.h> -#include <linux/mutex.h> +#include <linux/spinlock.h> struct net; struct phonet_device_list { struct list_head list; - struct mutex lock; + spinlock_t lock; }; struct phonet_device_list *phonet_device_list(struct net *net); @@ -38,11 +38,11 @@ int phonet_address_add(struct net_device *dev, u8 addr); int phonet_address_del(struct net_device *dev, u8 addr); u8 phonet_address_get(struct net_device *dev, u8 addr); int phonet_address_lookup(struct net *net, u8 addr); -void phonet_address_notify(int event, struct net_device *dev, u8 addr); +void phonet_address_notify(struct net *net, int event, u32 ifindex, u8 addr); int phonet_route_add(struct net_device *dev, u8 daddr); int phonet_route_del(struct net_device *dev, u8 daddr); -void rtm_phonet_notify(int event, struct net_device *dev, u8 dst); +void rtm_phonet_notify(struct net *net, int event, u32 ifindex, u8 dst); struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr); struct net_device *phonet_route_output(struct net *net, u8 daddr); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 4880b3a7aced..cf199af85c52 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -755,6 +755,7 @@ tc_cls_common_offload_init(struct flow_cls_common_offload *cls_common, cls_common->chain_index = tp->chain->index; cls_common->protocol = tp->protocol; cls_common->prio = tp->prio >> 16; + cls_common->skip_sw = tc_skip_sw(flags); if (tc_skip_sw(flags) || flags & TCA_CLS_FLAGS_VERBOSE) cls_common->extack = extack; } diff --git a/include/net/route.h b/include/net/route.h index 1789f1e6640b..84cb1e04f5cd 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -156,12 +156,12 @@ static inline struct rtable *ip_route_output_key(struct net *net, struct flowi4 * structure is only partially set, it may bypass some fib-rules. */ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr, - __be32 saddr, u8 tos, int oif, - __u8 scope) + __be32 saddr, dscp_t dscp, + int oif, __u8 scope) { struct flowi4 fl4 = { .flowi4_oif = oif, - .flowi4_tos = tos, + .flowi4_tos = inet_dscp_to_dsfield(dscp), .flowi4_scope = scope, .daddr = daddr, .saddr = saddr, @@ -198,30 +198,35 @@ static inline struct rtable *ip_route_output_gre(struct net *net, struct flowi4 fl4->fl4_gre_key = gre_key; return ip_route_output_key(net, fl4); } -int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, - u8 tos, struct net_device *dev, - struct in_device *in_dev, u32 *itag); -int ip_route_input_noref(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin); -int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin, - const struct sk_buff *hint); - -static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, - u8 tos, struct net_device *devin) + +enum skb_drop_reason +ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + struct in_device *in_dev, u32 *itag); +enum skb_drop_reason +ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev); +enum skb_drop_reason +ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr, + dscp_t dscp, struct net_device *dev, + const struct sk_buff *hint); + +static inline enum skb_drop_reason +ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src, dscp_t dscp, + struct net_device *devin) { - int err; + enum skb_drop_reason reason; rcu_read_lock(); - err = ip_route_input_noref(skb, dst, src, tos, devin); - if (!err) { + reason = ip_route_input_noref(skb, dst, src, dscp, devin); + if (!reason) { skb_dst_force(skb); if (!skb_dst(skb)) - err = -EINVAL; + reason = SKB_DROP_REASON_NOT_SPECIFIED; } rcu_read_unlock(); - return err; + return reason; } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, int oif, diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index 2d3eb7cb4dff..bc0069a8b6ea 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -3,6 +3,7 @@ #define __NET_RTNETLINK_H #include <linux/rtnetlink.h> +#include <linux/srcu.h> #include <net/netlink.h> typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *, @@ -11,6 +12,8 @@ typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *); enum rtnl_link_flags { RTNL_FLAG_DOIT_UNLOCKED = BIT(0), +#define RTNL_FLAG_DOIT_PERNET RTNL_FLAG_DOIT_UNLOCKED +#define RTNL_FLAG_DOIT_PERNET_WIP RTNL_FLAG_DOIT_UNLOCKED RTNL_FLAG_BULK_DEL_SUPPORTED = BIT(1), RTNL_FLAG_DUMP_UNLOCKED = BIT(2), RTNL_FLAG_DUMP_SPLIT_NLM_DONE = BIT(3), /* legacy behavior */ @@ -29,6 +32,16 @@ static inline enum rtnl_kinds rtnl_msgtype_kind(int msgtype) return msgtype & RTNL_KIND_MASK; } +/** + * struct rtnl_msg_handler - rtnetlink message type and handlers + * + * @owner: NULL for built-in, THIS_MODULE for module + * @protocol: Protocol family or PF_UNSPEC + * @msgtype: rtnetlink message type + * @doit: Function pointer called for each request message + * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message + * @flags: rtnl_link_flags to modify behaviour of doit/dumpit functions + */ struct rtnl_msg_handler { struct module *owner; int protocol; @@ -38,11 +51,6 @@ struct rtnl_msg_handler { int flags; }; -void rtnl_register(int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); -int rtnl_register_module(struct module *owner, int protocol, int msgtype, - rtnl_doit_func, rtnl_dumpit_func, unsigned int flags); -int rtnl_unregister(int protocol, int msgtype); void rtnl_unregister_all(int protocol); int __rtnl_register_many(const struct rtnl_msg_handler *handlers, int n); @@ -64,9 +72,11 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) /** * struct rtnl_link_ops - rtnetlink link operations * - * @list: Used internally + * @list: Used internally, protected by link_ops_mutex and SRCU + * @srcu: Used internally * @kind: Identifier * @netns_refund: Physical device, move to init_net on netns exit + * @peer_type: Peer device specific netlink attribute number (e.g. VETH_INFO_PEER) * @maxtype: Highest device specific netlink attribute number * @policy: Netlink policy for device specific attribute validation * @validate: Optional validation function for netlink/changelink parameters @@ -95,6 +105,7 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) */ struct rtnl_link_ops { struct list_head list; + struct srcu_struct srcu; const char *kind; @@ -107,6 +118,7 @@ struct rtnl_link_ops { void (*setup)(struct net_device *dev); bool netns_refund; + const u16 peer_type; unsigned int maxtype; const struct nla_policy *policy; int (*validate)(struct nlattr *tb[], @@ -155,16 +167,14 @@ struct rtnl_link_ops { int *prividx, int attr); }; -int __rtnl_link_register(struct rtnl_link_ops *ops); -void __rtnl_link_unregister(struct rtnl_link_ops *ops); - int rtnl_link_register(struct rtnl_link_ops *ops); void rtnl_link_unregister(struct rtnl_link_ops *ops); /** * struct rtnl_af_ops - rtnetlink address family operations * - * @list: Used internally + * @list: Used internally, protected by RTNL and SRCU + * @srcu: Used internally * @family: Address family * @fill_link_af: Function to fill IFLA_AF_SPEC with address family * specific netlink attributes. @@ -177,6 +187,8 @@ void rtnl_link_unregister(struct rtnl_link_ops *ops); */ struct rtnl_af_ops { struct list_head list; + struct srcu_struct srcu; + int family; int (*fill_link_af)(struct sk_buff *skb, @@ -196,7 +208,7 @@ struct rtnl_af_ops { size_t (*get_stats_af_size)(const struct net_device *dev); }; -void rtnl_af_register(struct rtnl_af_ops *ops); +int rtnl_af_register(struct rtnl_af_ops *ops); void rtnl_af_unregister(struct rtnl_af_ops *ops); struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]); diff --git a/include/net/sock.h b/include/net/sock.h index f29c14448938..7464e9f9f47c 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -956,6 +956,12 @@ enum sock_flags { }; #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE)) +/* + * The highest bit of sk_tsflags is reserved for kernel-internal + * SOCKCM_FLAG_TS_OPT_ID. There is a check in core/sock.c to control that + * SOF_TIMESTAMPING* values do not reach this reserved area + */ +#define SOCKCM_FLAG_TS_OPT_ID BIT(31) static inline void sock_copy_flags(struct sock *nsk, const struct sock *osk) { @@ -1754,6 +1760,15 @@ void sock_efree(struct sk_buff *skb); #ifdef CONFIG_INET void sock_edemux(struct sk_buff *skb); void sock_pfree(struct sk_buff *skb); + +static inline void skb_set_owner_edemux(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + if (refcount_inc_not_zero(&sk->sk_refcnt)) { + skb->sk = sk; + skb->destructor = sock_edemux; + } +} #else #define sock_edemux sock_efree #endif @@ -1798,6 +1813,7 @@ struct sockcm_cookie { u64 transmit_time; u32 mark; u32 tsflags; + u32 ts_opt_id; }; static inline void sockcm_init(struct sockcm_cookie *sockc, @@ -2655,39 +2671,48 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk, sock_write_timestamp(sk, 0); } -void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags); +void __sock_tx_timestamp(__u32 tsflags, __u8 *tx_flags); /** * _sock_tx_timestamp - checks whether the outgoing packet is to be time stamped * @sk: socket sending this packet - * @tsflags: timestamping flags to use + * @sockc: pointer to socket cmsg cookie to get timestamping info * @tx_flags: completed with instructions for time stamping * @tskey: filled in with next sk_tskey (not for TCP, which uses seqno) * * Note: callers should take care of initial ``*tx_flags`` value (usually 0) */ -static inline void _sock_tx_timestamp(struct sock *sk, __u16 tsflags, +static inline void _sock_tx_timestamp(struct sock *sk, + const struct sockcm_cookie *sockc, __u8 *tx_flags, __u32 *tskey) { + __u32 tsflags = sockc->tsflags; + if (unlikely(tsflags)) { __sock_tx_timestamp(tsflags, tx_flags); if (tsflags & SOF_TIMESTAMPING_OPT_ID && tskey && - tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) - *tskey = atomic_inc_return(&sk->sk_tskey) - 1; + tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) { + if (tsflags & SOCKCM_FLAG_TS_OPT_ID) + *tskey = sockc->ts_opt_id; + else + *tskey = atomic_inc_return(&sk->sk_tskey) - 1; + } } if (unlikely(sock_flag(sk, SOCK_WIFI_STATUS))) *tx_flags |= SKBTX_WIFI_STATUS; } -static inline void sock_tx_timestamp(struct sock *sk, __u16 tsflags, +static inline void sock_tx_timestamp(struct sock *sk, + const struct sockcm_cookie *sockc, __u8 *tx_flags) { - _sock_tx_timestamp(sk, tsflags, tx_flags, NULL); + _sock_tx_timestamp(sk, sockc, tx_flags, NULL); } -static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags) +static inline void skb_setup_tx_timestamp(struct sk_buff *skb, + const struct sockcm_cookie *sockc) { - _sock_tx_timestamp(skb->sk, tsflags, &skb_shinfo(skb)->tx_flags, + _sock_tx_timestamp(skb->sk, sockc, &skb_shinfo(skb)->tx_flags, &skb_shinfo(skb)->tskey); } @@ -2791,6 +2816,16 @@ static inline bool sk_listener(const struct sock *sk) return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV); } +/* This helper checks if a socket is a LISTEN or NEW_SYN_RECV or TIME_WAIT + * TCP SYNACK messages can be attached to LISTEN or NEW_SYN_RECV (depending on SYNCOOKIE) + * TCP RST and ACK can be attached to TIME_WAIT. + */ +static inline bool sk_listener_or_tw(const struct sock *sk) +{ + return (1 << READ_ONCE(sk->sk_state)) & + (TCPF_LISTEN | TCPF_NEW_SYN_RECV | TCPF_TIME_WAIT); +} + void sock_enable_timestamp(struct sock *sk, enum sock_flags flag); int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level, int type); @@ -2815,8 +2850,6 @@ void sk_get_meminfo(const struct sock *sk, u32 *meminfo); extern __u32 sysctl_wmem_max; extern __u32 sysctl_rmem_max; -extern int sysctl_tstamp_allow_data; - extern __u32 sysctl_wmem_default; extern __u32 sysctl_rmem_default; diff --git a/include/net/tcp.h b/include/net/tcp.h index d1948d357dad..e9b37b76e894 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2430,6 +2430,20 @@ void tcp_plb_update_state(const struct sock *sk, struct tcp_plb_state *plb, void tcp_plb_check_rehash(struct sock *sk, struct tcp_plb_state *plb); void tcp_plb_update_state_upon_rto(struct sock *sk, struct tcp_plb_state *plb); +static inline void tcp_warn_once(const struct sock *sk, bool cond, const char *str) +{ + WARN_ONCE(cond, + "%scwn:%u out:%u sacked:%u lost:%u retrans:%u tlp_high_seq:%u sk_state:%u ca_state:%u advmss:%u mss_cache:%u pmtu:%u\n", + str, + tcp_snd_cwnd(tcp_sk(sk)), + tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, + tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, + tcp_sk(sk)->tlp_high_seq, sk->sk_state, + inet_csk(sk)->icsk_ca_state, + tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, + inet_csk(sk)->icsk_pmtu_cookie); +} + /* At how many usecs into the future should the RTO fire? */ static inline s64 tcp_rto_delta_us(const struct sock *sk) { @@ -2441,17 +2455,7 @@ static inline s64 tcp_rto_delta_us(const struct sock *sk) return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; } else { - WARN_ONCE(1, - "rtx queue emtpy: " - "out:%u sacked:%u lost:%u retrans:%u " - "tlp_high_seq:%u sk_state:%u ca_state:%u " - "advmss:%u mss_cache:%u pmtu:%u\n", - tcp_sk(sk)->packets_out, tcp_sk(sk)->sacked_out, - tcp_sk(sk)->lost_out, tcp_sk(sk)->retrans_out, - tcp_sk(sk)->tlp_high_seq, sk->sk_state, - inet_csk(sk)->icsk_ca_state, - tcp_sk(sk)->advmss, tcp_sk(sk)->mss_cache, - inet_csk(sk)->icsk_pmtu_cookie); + tcp_warn_once(sk, 1, "rtx queue empty: "); return jiffies_to_usecs(rto); } diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h index 1d46460d0fef..df655ce6987d 100644 --- a/include/net/tcp_ao.h +++ b/include/net/tcp_ao.h @@ -183,7 +183,8 @@ int tcp_ao_hash_skb(unsigned short int family, const u8 *tkey, int hash_offset, u32 sne); int tcp_parse_ao(struct sock *sk, int cmd, unsigned short int family, sockptr_t optval, int optlen); -struct tcp_ao_key *tcp_ao_established_key(struct tcp_ao_info *ao, +struct tcp_ao_key *tcp_ao_established_key(const struct sock *sk, + struct tcp_ao_info *ao, int sndid, int rcvid); int tcp_ao_copy_all_matching(const struct sock *sk, struct sock *newsk, struct request_sock *req, struct sk_buff *skb, diff --git a/include/net/udp.h b/include/net/udp.h index 61222545ab1c..6e89520e100d 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -50,29 +50,56 @@ struct udp_skb_cb { #define UDP_SKB_CB(__skb) ((struct udp_skb_cb *)((__skb)->cb)) /** - * struct udp_hslot - UDP hash slot + * struct udp_hslot - UDP hash slot used by udp_table.hash/hash4 * * @head: head of list of sockets + * @nulls_head: head of list of sockets, only used by hash4 * @count: number of sockets in 'head' list * @lock: spinlock protecting changes to head/count */ struct udp_hslot { - struct hlist_head head; + union { + struct hlist_head head; + /* hash4 uses hlist_nulls to avoid moving wrongly onto another + * hlist, because rehash() can happen with lookup(). + */ + struct hlist_nulls_head nulls_head; + }; int count; spinlock_t lock; -} __attribute__((aligned(2 * sizeof(long)))); +} __aligned(2 * sizeof(long)); + +/** + * struct udp_hslot_main - UDP hash slot used by udp_table.hash2 + * + * @hslot: basic hash slot + * @hash4_cnt: number of sockets in hslot4 of the same + * (local port, local address) + */ +struct udp_hslot_main { + struct udp_hslot hslot; /* must be the first member */ +#if !IS_ENABLED(CONFIG_BASE_SMALL) + u32 hash4_cnt; +#endif +} __aligned(2 * sizeof(long)); +#define UDP_HSLOT_MAIN(__hslot) ((struct udp_hslot_main *)(__hslot)) /** * struct udp_table - UDP table * * @hash: hash table, sockets are hashed on (local port) * @hash2: hash table, sockets are hashed on (local port, local address) + * @hash4: hash table, connected sockets are hashed on + * (local port, local address, remote port, remote address) * @mask: number of slots in hash tables, minus 1 * @log: log2(number of slots in hash table) */ struct udp_table { struct udp_hslot *hash; - struct udp_hslot *hash2; + struct udp_hslot_main *hash2; +#if !IS_ENABLED(CONFIG_BASE_SMALL) + struct udp_hslot *hash4; +#endif unsigned int mask; unsigned int log; }; @@ -84,6 +111,7 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table, { return &table->hash[udp_hashfn(net, num, table->mask)]; } + /* * For secondary hash, net_hash_mix() is performed before calling * udp_hashslot2(), this explains difference with udp_hashslot() @@ -91,8 +119,89 @@ static inline struct udp_hslot *udp_hashslot(struct udp_table *table, static inline struct udp_hslot *udp_hashslot2(struct udp_table *table, unsigned int hash) { - return &table->hash2[hash & table->mask]; + return &table->hash2[hash & table->mask].hslot; +} + +#if IS_ENABLED(CONFIG_BASE_SMALL) +static inline void udp_table_hash4_init(struct udp_table *table) +{ +} + +static inline struct udp_hslot *udp_hashslot4(struct udp_table *table, + unsigned int hash) +{ + BUILD_BUG(); + return NULL; +} + +static inline bool udp_hashed4(const struct sock *sk) +{ + return false; +} + +static inline unsigned int udp_hash4_slot_size(void) +{ + return 0; +} + +static inline bool udp_has_hash4(const struct udp_hslot *hslot2) +{ + return false; +} + +static inline void udp_hash4_inc(struct udp_hslot *hslot2) +{ +} + +static inline void udp_hash4_dec(struct udp_hslot *hslot2) +{ } +#else /* !CONFIG_BASE_SMALL */ + +/* Must be called with table->hash2 initialized */ +static inline void udp_table_hash4_init(struct udp_table *table) +{ + table->hash4 = (void *)(table->hash2 + (table->mask + 1)); + for (int i = 0; i <= table->mask; i++) { + table->hash2[i].hash4_cnt = 0; + + INIT_HLIST_NULLS_HEAD(&table->hash4[i].nulls_head, i); + table->hash4[i].count = 0; + spin_lock_init(&table->hash4[i].lock); + } +} + +static inline struct udp_hslot *udp_hashslot4(struct udp_table *table, + unsigned int hash) +{ + return &table->hash4[hash & table->mask]; +} + +static inline bool udp_hashed4(const struct sock *sk) +{ + return !hlist_nulls_unhashed(&udp_sk(sk)->udp_lrpa_node); +} + +static inline unsigned int udp_hash4_slot_size(void) +{ + return sizeof(struct udp_hslot); +} + +static inline bool udp_has_hash4(const struct udp_hslot *hslot2) +{ + return UDP_HSLOT_MAIN(hslot2)->hash4_cnt; +} + +static inline void udp_hash4_inc(struct udp_hslot *hslot2) +{ + UDP_HSLOT_MAIN(hslot2)->hash4_cnt++; +} + +static inline void udp_hash4_dec(struct udp_hslot *hslot2) +{ + UDP_HSLOT_MAIN(hslot2)->hash4_cnt--; +} +#endif /* CONFIG_BASE_SMALL */ extern struct proto udp_prot; @@ -193,13 +302,29 @@ static inline int udp_lib_hash(struct sock *sk) } void udp_lib_unhash(struct sock *sk); -void udp_lib_rehash(struct sock *sk, u16 new_hash); +void udp_lib_rehash(struct sock *sk, u16 new_hash, u16 new_hash4); +u32 udp_ehashfn(const struct net *net, const __be32 laddr, const __u16 lport, + const __be32 faddr, const __be16 fport); static inline void udp_lib_close(struct sock *sk, long timeout) { sk_common_release(sk); } +/* hash4 routines shared between UDPv4/6 */ +#if IS_ENABLED(CONFIG_BASE_SMALL) +static inline void udp_lib_hash4(struct sock *sk, u16 hash) +{ +} + +static inline void udp4_hash4(struct sock *sk) +{ +} +#else /* !CONFIG_BASE_SMALL */ +void udp_lib_hash4(struct sock *sk, u16 hash); +void udp4_hash4(struct sock *sk); +#endif /* CONFIG_BASE_SMALL */ + int udp_lib_get_port(struct sock *sk, unsigned short snum, unsigned int hash2_nulladdr); diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h index 0a5dca2b2b3f..40085afd9160 100644 --- a/include/net/xdp_sock_drv.h +++ b/include/net/xdp_sock_drv.h @@ -126,8 +126,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp) if (likely(!xdp_buff_has_frags(xdp))) goto out; - list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) { - list_del(&pos->xskb_list_node); + list_for_each_entry_safe(pos, tmp, xskb_list, list_node) { + list_del(&pos->list_node); xp_free(pos); } @@ -140,7 +140,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp) { struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp); - list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list); + list_add_tail(&frag->list_node, &frag->pool->xskb_list); } static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) @@ -150,9 +150,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first) struct xdp_buff_xsk *frag; frag = list_first_entry_or_null(&xskb->pool->xskb_list, - struct xdp_buff_xsk, xskb_list_node); + struct xdp_buff_xsk, list_node); if (frag) { - list_del(&frag->xskb_list_node); + list_del(&frag->list_node); ret = &frag->xdp; } @@ -163,7 +163,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail) { struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp); - list_del(&xskb->xskb_list_node); + list_del(&xskb->list_node); } static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) @@ -172,7 +172,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first) struct xdp_buff_xsk *frag; frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk, - xskb_list_node); + list_node); return &frag->xdp; } diff --git a/include/net/xfrm.h b/include/net/xfrm.h index a0bdd58f401c..32c09e85a64c 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -19,6 +19,7 @@ #include <net/sock.h> #include <net/dst.h> +#include <net/inet_dscp.h> #include <net/ip.h> #include <net/route.h> #include <net/ipv6.h> @@ -184,10 +185,13 @@ struct xfrm_state { }; struct hlist_node byspi; struct hlist_node byseq; + struct hlist_node state_cache; + struct hlist_node state_cache_input; refcount_t refcnt; spinlock_t lock; + u32 pcpu_num; struct xfrm_id id; struct xfrm_selector sel; struct xfrm_mark mark; @@ -351,7 +355,7 @@ void xfrm_if_unregister_cb(void); struct xfrm_dst_lookup_params { struct net *net; - int tos; + dscp_t dscp; int oif; xfrm_address_t *saddr; xfrm_address_t *daddr; @@ -536,6 +540,7 @@ struct xfrm_policy_queue { * @xp_net: network namespace the policy lives in * @bydst: hlist node for SPD hash table or rbtree list * @byidx: hlist node for index hash table + * @state_cache_list: hlist head for policy cached xfrm states * @lock: serialize changes to policy structure members * @refcnt: reference count, freed once it reaches 0 * @pos: kernel internal tie-breaker to determine age of policy @@ -566,6 +571,8 @@ struct xfrm_policy { struct hlist_node bydst; struct hlist_node byidx; + struct hlist_head state_cache_list; + /* This lock only affects elements except for entry. */ rwlock_t lock; refcount_t refcnt; @@ -1645,6 +1652,10 @@ int xfrm_state_update(struct xfrm_state *x); struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark, const xfrm_address_t *daddr, __be32 spi, u8 proto, unsigned short family); +struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark, + const xfrm_address_t *daddr, + __be32 spi, u8 proto, + unsigned short family); struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark, const xfrm_address_t *daddr, const xfrm_address_t *saddr, @@ -1684,7 +1695,7 @@ struct xfrmk_spdinfo { u32 spdhmcnt; }; -struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); +struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num); int xfrm_state_delete(struct xfrm_state *x); int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); @@ -1796,7 +1807,7 @@ int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack); int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi, struct netlink_ext_ack *extack); struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, - u8 mode, u32 reqid, u32 if_id, u8 proto, + u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto, const xfrm_address_t *daddr, const xfrm_address_t *saddr, int create, unsigned short family); diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h index bacb33f1e3e5..bb03cee716b3 100644 --- a/include/net/xsk_buff_pool.h +++ b/include/net/xsk_buff_pool.h @@ -28,9 +28,7 @@ struct xdp_buff_xsk { dma_addr_t dma; dma_addr_t frame_dma; struct xsk_buff_pool *pool; - u64 orig_addr; - struct list_head free_list_node; - struct list_head xskb_list_node; + struct list_head list_node; }; #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb)) @@ -78,6 +76,7 @@ struct xsk_buff_pool { u32 chunk_size; u32 chunk_shift; u32 frame_len; + u32 xdp_zc_max_segs; u8 tx_metadata_len; /* inherited from umem */ u8 cached_need_wakeup; bool uses_need_wakeup; @@ -120,7 +119,6 @@ void xp_free(struct xdp_buff_xsk *xskb); static inline void xp_init_xskb_addr(struct xdp_buff_xsk *xskb, struct xsk_buff_pool *pool, u64 addr) { - xskb->orig_addr = addr; xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom; } @@ -222,14 +220,19 @@ static inline void xp_release(struct xdp_buff_xsk *xskb) xskb->pool->free_heads[xskb->pool->free_heads_cnt++] = xskb; } -static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb) +static inline u64 xp_get_handle(struct xdp_buff_xsk *xskb, + struct xsk_buff_pool *pool) { - u64 offset = xskb->xdp.data - xskb->xdp.data_hard_start; + u64 orig_addr = xskb->xdp.data - pool->addrs; + u64 offset; - offset += xskb->pool->headroom; - if (!xskb->pool->unaligned) - return xskb->orig_addr + offset; - return xskb->orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); + if (!pool->unaligned) + return orig_addr; + + offset = xskb->xdp.data - xskb->xdp.data_hard_start; + orig_addr -= offset; + offset += pool->headroom; + return orig_addr + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); } static inline bool xp_tx_metadata_enabled(const struct xsk_buff_pool *pool) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index aa8ede439905..3417636da960 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -2675,6 +2675,12 @@ struct ib_device_ops { */ void (*del_sub_dev)(struct ib_device *sub_dev); + /** + * ufile_cleanup - Attempt to cleanup ubojects HW resources inside + * the ufile. + */ + void (*ufile_hw_cleanup)(struct ib_uverbs_file *ufile); + DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); @@ -2948,6 +2954,14 @@ int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext, size_t length, u32 min_pgoff, u32 max_pgoff); +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) +void rdma_user_mmap_disassociate(struct ib_device *device); +#else +static inline void rdma_user_mmap_disassociate(struct ib_device *device) +{ +} +#endif + static inline int rdma_user_mmap_entry_insert_exact(struct ib_ucontext *ucontext, struct rdma_user_mmap_entry *entry, @@ -4726,6 +4740,9 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector) * @device: the rdma device */ void rdma_roce_rescan_device(struct ib_device *ibdev); +void rdma_roce_rescan_port(struct ib_device *ib_dev, u32 port); +void roce_del_all_netdev_gids(struct ib_device *ib_dev, + u32 port, struct net_device *ndev); struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h index ccd11631c167..26ba919ac245 100644 --- a/include/rdma/uverbs_types.h +++ b/include/rdma/uverbs_types.h @@ -134,6 +134,8 @@ static inline void uverbs_uobject_get(struct ib_uobject *uobject) } void uverbs_uobject_put(struct ib_uobject *uobject); +int uverbs_try_lock_object(struct ib_uobject *uobj, enum rdma_lookup_mode mode); + struct uverbs_obj_fd_type { /* * In fd based objects, uverbs_obj_type_ops points to generic @@ -150,6 +152,37 @@ struct uverbs_obj_fd_type { int flags; }; +struct ib_uverbs_file { + struct kref ref; + struct ib_uverbs_device *device; + struct mutex ucontext_lock; + /* + * ucontext must be accessed via ib_uverbs_get_ucontext() or with + * ucontext_lock held + */ + struct ib_ucontext *ucontext; + struct ib_uverbs_async_event_file *default_async_file; + struct list_head list; + + /* + * To access the uobjects list hw_destroy_rwsem must be held for write + * OR hw_destroy_rwsem held for read AND uobjects_lock held. + * hw_destroy_rwsem should be called across any destruction of the HW + * object of an associated uobject. + */ + struct rw_semaphore hw_destroy_rwsem; + spinlock_t uobjects_lock; + struct list_head uobjects; + + struct mutex umap_lock; + struct list_head umaps; + struct page *disassociate_page; + + struct xarray idr; + + struct mutex disassociation_lock; +}; + extern const struct uverbs_obj_type_class uverbs_idr_class; extern const struct uverbs_obj_type_class uverbs_fd_class; int uverbs_uobject_fd_release(struct inode *inode, struct file *filp); diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h index 3c5899290aed..6616348e59b9 100644 --- a/include/scsi/libfcoe.h +++ b/include/scsi/libfcoe.h @@ -15,7 +15,7 @@ #include <linux/skbuff.h> #include <linux/workqueue.h> #include <linux/local_lock.h> -#include <linux/random.h> +#include <linux/prandom.h> #include <scsi/fc/fc_fcoe.h> #include <scsi/libfc.h> #include <scsi/fcoe_sysfs.h> diff --git a/include/soc/amlogic/reset-meson-aux.h b/include/soc/amlogic/reset-meson-aux.h new file mode 100644 index 000000000000..d8a15d48c984 --- /dev/null +++ b/include/soc/amlogic/reset-meson-aux.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SOC_RESET_MESON_AUX_H +#define __SOC_RESET_MESON_AUX_H + +#include <linux/err.h> + +struct device; +struct regmap; + +#if IS_ENABLED(CONFIG_RESET_MESON_AUX) +int devm_meson_rst_aux_register(struct device *dev, + struct regmap *map, + const char *adev_name); +#else +static inline int devm_meson_rst_aux_register(struct device *dev, + struct regmap *map, + const char *adev_name) +{ + return 0; +} +#endif + +#endif /* __SOC_RESET_MESON_AUX_H */ diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h index 0d3d6beb7fdb..7f7a4932d7f1 100644 --- a/include/soc/fsl/qman.h +++ b/include/soc/fsl/qman.h @@ -242,7 +242,7 @@ static inline void qm_sg_entry_set_f(struct qm_sg_entry *sg, int len) static inline int qm_sg_entry_get_off(const struct qm_sg_entry *sg) { - return be32_to_cpu(sg->offset) & QM_SG_OFF_MASK; + return be16_to_cpu(sg->offset) & QM_SG_OFF_MASK; } /* "Frame Dequeue Response" */ diff --git a/include/soc/nuvoton/clock-npcm8xx.h b/include/soc/nuvoton/clock-npcm8xx.h new file mode 100644 index 000000000000..1d974e89d8a8 --- /dev/null +++ b/include/soc/nuvoton/clock-npcm8xx.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __SOC_NPCM8XX_CLOCK_H +#define __SOC_NPCM8XX_CLOCK_H + +#include <linux/auxiliary_bus.h> +#include <linux/container_of.h> + +struct npcm_clock_adev { + void __iomem *base; + struct auxiliary_device adev; +}; + +static inline struct npcm_clock_adev *to_npcm_clock_adev(struct auxiliary_device *_adev) +{ + return container_of(_adev, struct npcm_clock_adev, adev); +} + +#endif diff --git a/include/sound/adau1373.h b/include/sound/adau1373.h deleted file mode 100644 index 4c32ba1328ed..000000000000 --- a/include/sound/adau1373.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Analog Devices ADAU1373 Audio Codec drive - * - * Copyright 2011 Analog Devices Inc. - * Author: Lars-Peter Clausen <lars@metafoo.de> - */ - -#ifndef __SOUND_ADAU1373_H__ -#define __SOUND_ADAU1373_H__ - -enum adau1373_micbias_voltage { - ADAU1373_MICBIAS_2_9V = 0, - ADAU1373_MICBIAS_2_2V = 1, - ADAU1373_MICBIAS_2_6V = 2, - ADAU1373_MICBIAS_1_8V = 3, -}; - -#define ADAU1373_DRC_SIZE 13 - -struct adau1373_platform_data { - bool input_differential[4]; - bool lineout_differential; - bool lineout_ground_sense; - - unsigned int num_drc; - uint8_t drc_setting[3][ADAU1373_DRC_SIZE]; - - enum adau1373_micbias_voltage micbias1; - enum adau1373_micbias_voltage micbias2; -}; - -#endif diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index bcf872c17dd3..b55c9eeb2b54 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h @@ -20,6 +20,30 @@ struct snd_compr_ops; /** + * struct snd_compr_task_runtime: task runtime description + * @list: list of all managed tasks + * @input: input DMA buffer + * @output: output DMA buffer + * @seqno: sequence number + * @input_size: really used data in the input buffer + * @output_size: really used data in the output buffer + * @flags: see SND_COMPRESS_TFLG_* + * @state: actual task state + * @private_value: used by the lowlevel driver (opaque) + */ +struct snd_compr_task_runtime { + struct list_head list; + struct dma_buf *input; + struct dma_buf *output; + u64 seqno; + u64 input_size; + u64 output_size; + u32 flags; + u8 state; + void *private_value; +}; + +/** * struct snd_compr_runtime: runtime stream description * @state: stream state * @ops: pointer to DSP callbacks @@ -37,6 +61,10 @@ struct snd_compr_ops; * @dma_addr: physical buffer address (not accessible from main CPU) * @dma_bytes: size of DMA area * @dma_buffer_p: runtime dma buffer pointer + * @active_tasks: count of active tasks + * @total_tasks: count of all tasks + * @task_seqno: last task sequence number (!= 0) + * @tasks: list of all tasks */ struct snd_compr_runtime { snd_pcm_state_t state; @@ -54,6 +82,13 @@ struct snd_compr_runtime { dma_addr_t dma_addr; size_t dma_bytes; struct snd_dma_buffer *dma_buffer_p; + +#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) + u32 active_tasks; + u32 total_tasks; + u64 task_seqno; + struct list_head tasks; +#endif }; /** @@ -108,6 +143,10 @@ struct snd_compr_stream { * Not valid if copy is implemented * @get_caps: Retrieve DSP capabilities, mandatory * @get_codec_caps: Retrieve capabilities for a specific codec, mandatory + * @task_create: Create a set of input/output buffers for accel operations + * @task_start: Start (queue) a task for accel operations + * @task_stop: Stop (dequeue) a task for accel operations + * @task_free: Free a set of input/output buffers for accel operations */ struct snd_compr_ops { int (*open)(struct snd_compr_stream *stream); @@ -132,6 +171,12 @@ struct snd_compr_ops { struct snd_compr_caps *caps); int (*get_codec_caps) (struct snd_compr_stream *stream, struct snd_compr_codec_caps *codec); +#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) + int (*task_create) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task); + int (*task_start) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task); + int (*task_stop) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task); + int (*task_free) (struct snd_compr_stream *stream, struct snd_compr_task_runtime *task); +#endif }; /** @@ -242,4 +287,9 @@ int snd_compr_free_pages(struct snd_compr_stream *stream); int snd_compr_stop_error(struct snd_compr_stream *stream, snd_pcm_state_t state); +#if IS_ENABLED(CONFIG_SND_COMPRESS_ACCEL) +void snd_compr_task_finished(struct snd_compr_stream *stream, + struct snd_compr_task_runtime *task); +#endif + #endif diff --git a/include/sound/hda-mlink.h b/include/sound/hda-mlink.h index 9ced94686ce3..6774f4b9e5fc 100644 --- a/include/sound/hda-mlink.h +++ b/include/sound/hda-mlink.h @@ -15,6 +15,7 @@ int hda_bus_ml_init(struct hdac_bus *bus); void hda_bus_ml_free(struct hdac_bus *bus); int hdac_bus_eml_get_count(struct hdac_bus *bus, bool alt, int elid); +void hdac_bus_eml_enable_interrupt_unlocked(struct hdac_bus *bus, bool alt, int elid, bool enable); void hdac_bus_eml_enable_interrupt(struct hdac_bus *bus, bool alt, int elid, bool enable); bool hdac_bus_eml_check_interrupt(struct hdac_bus *bus, bool alt, int elid); @@ -72,6 +73,9 @@ static inline int hdac_bus_eml_get_count(struct hdac_bus *bus, bool alt, int elid) { return 0; } static inline void +hdac_bus_eml_enable_interrupt_unlocked(struct hdac_bus *bus, bool alt, int elid, bool enable) { } + +static inline void hdac_bus_eml_enable_interrupt(struct hdac_bus *bus, bool alt, int elid, bool enable) { } static inline bool diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h index 5ff31e6d41c1..db1cc0b897fd 100644 --- a/include/sound/hda_register.h +++ b/include/sound/hda_register.h @@ -180,7 +180,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 }; #define SD_STS_FIFO_READY 0x20 /* FIFO ready */ /* INTCTL and INTSTS */ -#define AZX_INT_ALL_STREAM 0xff /* all stream interrupts */ +#define AZX_INT_ALL_STREAM 0x3fffffff /* all stream interrupts */ #define AZX_INT_CTRL_EN 0x40000000 /* controller interrupt enable bit */ #define AZX_INT_GLOBAL_EN 0x80000000 /* global interrupt enable bit */ diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 0bf7d25434d7..67c99ffbf51b 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -97,11 +97,11 @@ struct snd_pcm_ops { #define SNDRV_PCM_TRIGGER_STOP 0 #define SNDRV_PCM_TRIGGER_START 1 -#define SNDRV_PCM_TRIGGER_PAUSE_PUSH 3 -#define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 4 -#define SNDRV_PCM_TRIGGER_SUSPEND 5 -#define SNDRV_PCM_TRIGGER_RESUME 6 -#define SNDRV_PCM_TRIGGER_DRAIN 7 +#define SNDRV_PCM_TRIGGER_PAUSE_PUSH 2 +#define SNDRV_PCM_TRIGGER_PAUSE_RELEASE 3 +#define SNDRV_PCM_TRIGGER_SUSPEND 4 +#define SNDRV_PCM_TRIGGER_RESUME 5 +#define SNDRV_PCM_TRIGGER_DRAIN 6 #define SNDRV_PCM_POS_XRUN ((snd_pcm_uframes_t)-1) @@ -1393,30 +1393,6 @@ snd_pcm_sgbuf_get_chunk_size(struct snd_pcm_substream *substream, return snd_sgbuf_get_chunk_size(snd_pcm_get_dma_buf(substream), ofs, size); } -/** - * snd_pcm_mmap_data_open - increase the mmap counter - * @area: VMA - * - * PCM mmap callback should handle this counter properly - */ -static inline void snd_pcm_mmap_data_open(struct vm_area_struct *area) -{ - struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data; - atomic_inc(&substream->mmap_count); -} - -/** - * snd_pcm_mmap_data_close - decrease the mmap counter - * @area: VMA - * - * PCM mmap callback should handle this counter properly - */ -static inline void snd_pcm_mmap_data_close(struct vm_area_struct *area) -{ - struct snd_pcm_substream *substream = (struct snd_pcm_substream *)area->vm_private_data; - atomic_dec(&substream->mmap_count); -} - int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area); /* mmap for io-memory area */ diff --git a/include/sound/sdca.h b/include/sound/sdca.h new file mode 100644 index 000000000000..7e138229e8f3 --- /dev/null +++ b/include/sound/sdca.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* + * The MIPI SDCA specification is available for public downloads at + * https://www.mipi.org/mipi-sdca-v1-0-download + * + * Copyright(c) 2024 Intel Corporation + */ + +#ifndef __SDCA_H__ +#define __SDCA_H__ + +struct sdw_slave; + +#define SDCA_MAX_FUNCTION_COUNT 8 + +/** + * sdca_device_desc - short descriptor for an SDCA Function + * @adr: ACPI address (used for SDCA register access) + * @type: Function topology type + * @name: human-readable string + */ +struct sdca_function_desc { + u64 adr; + u32 type; + const char *name; +}; + +/** + * sdca_device_data - structure containing all SDCA related information + * @sdca_interface_revision: value read from _DSD property, mainly to check + * for changes between silicon versions + * @num_functions: total number of supported SDCA functions. Invalid/unsupported + * functions will be skipped. + * @sdca_func: array of function descriptors + */ +struct sdca_device_data { + u32 interface_revision; + int num_functions; + struct sdca_function_desc sdca_func[SDCA_MAX_FUNCTION_COUNT]; +}; + +enum sdca_quirk { + SDCA_QUIRKS_RT712_VB, +}; + +#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_SOC_SDCA) + +void sdca_lookup_functions(struct sdw_slave *slave); +void sdca_lookup_interface_revision(struct sdw_slave *slave); +bool sdca_device_quirk_match(struct sdw_slave *slave, enum sdca_quirk quirk); + +#else + +static inline void sdca_lookup_functions(struct sdw_slave *slave) {} +static inline void sdca_lookup_interface_revision(struct sdw_slave *slave) {} +static inline bool sdca_device_quirk_match(struct sdw_slave *slave, enum sdca_quirk quirk) +{ + return false; +} +#endif + +#endif diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h new file mode 100644 index 000000000000..a01eec86b9a6 --- /dev/null +++ b/include/sound/sdca_function.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ +/* + * The MIPI SDCA specification is available for public downloads at + * https://www.mipi.org/mipi-sdca-v1-0-download + * + * Copyright(c) 2024 Intel Corporation + */ + +#ifndef __SDCA_FUNCTION_H__ +#define __SDCA_FUNCTION_H__ + +/* + * SDCA Function Types from SDCA specification v1.0a Section 5.1.2 + * all Function types not described are reserved + * Note that SIMPLE_AMP, SIMPLE_MIC and SIMPLE_JACK Function Types + * are NOT defined in SDCA 1.0a, but they were defined in earlier + * drafts and are planned for 1.1. + */ + +enum sdca_function_type { + SDCA_FUNCTION_TYPE_SMART_AMP = 0x01, /* Amplifier with protection features */ + SDCA_FUNCTION_TYPE_SIMPLE_AMP = 0x02, /* subset of SmartAmp */ + SDCA_FUNCTION_TYPE_SMART_MIC = 0x03, /* Smart microphone with acoustic triggers */ + SDCA_FUNCTION_TYPE_SIMPLE_MIC = 0x04, /* subset of SmartMic */ + SDCA_FUNCTION_TYPE_SPEAKER_MIC = 0x05, /* Combination of SmartMic and SmartAmp */ + SDCA_FUNCTION_TYPE_UAJ = 0x06, /* 3.5mm Universal Audio jack */ + SDCA_FUNCTION_TYPE_RJ = 0x07, /* Retaskable jack */ + SDCA_FUNCTION_TYPE_SIMPLE_JACK = 0x08, /* Subset of UAJ */ + SDCA_FUNCTION_TYPE_HID = 0x0A, /* Human Interface Device, for e.g. buttons */ + SDCA_FUNCTION_TYPE_IMP_DEF = 0x1F, /* Implementation-defined function */ +}; + +/* Human-readable names used for kernel logs and Function device registration/bind */ +#define SDCA_FUNCTION_TYPE_SMART_AMP_NAME "SmartAmp" +#define SDCA_FUNCTION_TYPE_SIMPLE_AMP_NAME "SimpleAmp" +#define SDCA_FUNCTION_TYPE_SMART_MIC_NAME "SmartMic" +#define SDCA_FUNCTION_TYPE_SIMPLE_MIC_NAME "SimpleMic" +#define SDCA_FUNCTION_TYPE_SPEAKER_MIC_NAME "SpeakerMic" +#define SDCA_FUNCTION_TYPE_UAJ_NAME "UAJ" +#define SDCA_FUNCTION_TYPE_RJ_NAME "RJ" +#define SDCA_FUNCTION_TYPE_SIMPLE_NAME "SimpleJack" +#define SDCA_FUNCTION_TYPE_HID_NAME "HID" + +enum sdca_entity0_controls { + SDCA_CONTROL_ENTITY_0_COMMIT_GROUP_MASK = 0x01, + SDCA_CONTROL_ENTITY_0_INTSTAT_CLEAR = 0x02, + SDCA_CONTROL_ENTITY_0_INT_ENABLE = 0x03, + SDCA_CONTROL_ENTITY_0_FUNCTION_SDCA_VERSION = 0x04, + SDCA_CONTROL_ENTITY_0_FUNCTION_TOPOLOGY = 0x05, + SDCA_CONTROL_ENTITY_0_FUNCTION_MANUFACTURER_ID = 0x06, + SDCA_CONTROL_ENTITY_0_FUNCTION_ID = 0x07, + SDCA_CONTROL_ENTITY_0_FUNCTION_VERSION = 0x08 +}; + +#endif diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h index 60d3b86a4660..72e371a21767 100644 --- a/include/sound/soc-acpi.h +++ b/include/sound/soc-acpi.h @@ -185,6 +185,10 @@ struct snd_soc_acpi_link_adr { * ACPI ID alone is not sufficient, wrong or misleading * @quirk_data: data used to uniquely identify a machine, usually a list of * audio codecs whose presence if checked with ACPI + * @machine_check: pointer to quirk function. The functionality is similar to + * the use of @machine_quirk, except that the return value is a boolean: the intent + * is to skip a machine if the additional hardware/firmware verification invalidates + * the initial selection in the snd_soc_acpi_mach table. * @pdata: intended for platform data or machine specific-ops. This structure * is not constant since this field may be updated at run-time * @sof_tplg_filename: Sound Open Firmware topology file name, if enabled @@ -203,6 +207,7 @@ struct snd_soc_acpi_mach { const char *board; struct snd_soc_acpi_mach * (*machine_quirk)(void *arg); const void *quirk_data; + bool (*machine_check)(void *arg); void *pdata; struct snd_soc_acpi_mach_params mach_params; const char *sof_tplg_filename; @@ -233,7 +238,6 @@ static inline bool snd_soc_acpi_sof_parent(struct device *dev) bool snd_soc_acpi_sdw_link_slaves_found(struct device *dev, const struct snd_soc_acpi_link_adr *link, - struct sdw_extended_slave_id *ids, - int num_slaves); + struct sdw_peripherals *peripherals); #endif diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h index 0d1b215f24f4..aab57c19f62b 100644 --- a/include/sound/soc-dai.h +++ b/include/sound/soc-dai.h @@ -216,8 +216,7 @@ void snd_soc_dai_shutdown(struct snd_soc_dai *dai, struct snd_pcm_substream *substream, int rollback); void snd_soc_dai_suspend(struct snd_soc_dai *dai); void snd_soc_dai_resume(struct snd_soc_dai *dai); -int snd_soc_dai_compress_new(struct snd_soc_dai *dai, - struct snd_soc_pcm_runtime *rtd, int num); +int snd_soc_dai_compress_new(struct snd_soc_dai *dai, struct snd_soc_pcm_runtime *rtd); bool snd_soc_dai_stream_valid(const struct snd_soc_dai *dai, int stream); void snd_soc_dai_action(struct snd_soc_dai *dai, int stream, int action); @@ -275,7 +274,7 @@ struct snd_soc_dai_ops { int (*probe)(struct snd_soc_dai *dai); int (*remove)(struct snd_soc_dai *dai); /* compress dai */ - int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num); + int (*compress_new)(struct snd_soc_pcm_runtime *rtd); /* Optional Callback used at pcm creation*/ int (*pcm_new)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); @@ -450,9 +449,9 @@ struct snd_soc_dai { struct snd_soc_dai_stream stream[SNDRV_PCM_STREAM_LAST + 1]; /* Symmetry data - only valid if symmetry is being enforced */ - unsigned int rate; - unsigned int channels; - unsigned int sample_bits; + unsigned int symmetric_rate; + unsigned int symmetric_channels; + unsigned int symmetric_sample_bits; /* parent platform/codec */ struct snd_soc_component *component; diff --git a/include/sound/soc.h b/include/sound/soc.h index e6e359c1a2ac..4f5d411e3823 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h @@ -486,11 +486,11 @@ struct snd_soc_component *snd_soc_lookup_component_nolocked(struct device *dev, struct snd_soc_component *snd_soc_lookup_component(struct device *dev, const char *driver_name); -int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num); +int soc_new_pcm(struct snd_soc_pcm_runtime *rtd); #ifdef CONFIG_SND_SOC_COMPRESS -int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num); +int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd); #else -static inline int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num) +static inline int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd) { return 0; } @@ -541,8 +541,13 @@ int snd_soc_tdm_params_to_bclk(const struct snd_pcm_hw_params *params, int tdm_width, int tdm_slots, int slot_multiple); /* set runtime hw params */ -int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, - const struct snd_pcm_hardware *hw); +static inline int snd_soc_set_runtime_hwparams(struct snd_pcm_substream *substream, + const struct snd_pcm_hardware *hw) +{ + substream->runtime->hw = *hw; + + return 0; +} struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component); struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component, @@ -815,11 +820,6 @@ struct snd_soc_dai_link { /* This DAI link can route to other DAI links at runtime (Frontend)*/ unsigned int dynamic:1; - /* REMOVE ME */ - /* DPCM capture and Playback support */ - unsigned int dpcm_capture:1; - unsigned int dpcm_playback:1; - /* DPCM used FE & BE merged format */ unsigned int dpcm_merged_format:1; /* DPCM used FE & BE merged channel */ @@ -1195,7 +1195,7 @@ struct snd_soc_pcm_runtime { struct dentry *debugfs_dpcm_root; #endif - unsigned int num; /* 0-based and monotonic increasing */ + unsigned int id; /* 0-based and monotonic increasing */ struct list_head list; /* rtd list of the soc card */ /* function mark */ @@ -1438,10 +1438,6 @@ struct snd_soc_dai *snd_soc_get_dai_via_args(const struct of_phandle_args *dai_a struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component, struct snd_soc_dai_driver *dai_drv, bool legacy_dai_naming); -struct snd_soc_dai *devm_snd_soc_register_dai(struct device *dev, - struct snd_soc_component *component, - struct snd_soc_dai_driver *dai_drv, - bool legacy_dai_naming); void snd_soc_unregister_dai(struct snd_soc_dai *dai); struct snd_soc_dai *snd_soc_find_dai( diff --git a/include/sound/soc_sdw_utils.h b/include/sound/soc_sdw_utils.h index f68c1f193b3b..0e82598e10af 100644 --- a/include/sound/soc_sdw_utils.h +++ b/include/sound/soc_sdw_utils.h @@ -28,6 +28,7 @@ * - SOC_SDW_CODEC_SPKR | SOF_SIDECAR_AMPS - Not currently supported */ #define SOC_SDW_SIDECAR_AMPS BIT(16) +#define SOC_SDW_CODEC_MIC BIT(17) #define SOC_SDW_UNUSED_DAI_ID -1 #define SOC_SDW_JACK_OUT_DAI_ID 0 @@ -59,6 +60,7 @@ struct asoc_sdw_dai_info { int (*rtd_init)(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); bool rtd_init_done; /* Indicate that the rtd_init callback is done */ unsigned long quirk; + bool quirk_exclude; }; struct asoc_sdw_codec_info { @@ -150,14 +152,15 @@ void asoc_sdw_init_dai_link(struct device *dev, struct snd_soc_dai_link *dai_lin struct snd_soc_dai_link_component *cpus, int cpus_num, struct snd_soc_dai_link_component *platform_component, int num_platforms, struct snd_soc_dai_link_component *codecs, - int codecs_num, int (*init)(struct snd_soc_pcm_runtime *rtd), + int codecs_num, int no_pcm, + int (*init)(struct snd_soc_pcm_runtime *rtd), const struct snd_soc_ops *ops); int asoc_sdw_init_simple_dai_link(struct device *dev, struct snd_soc_dai_link *dai_links, int *be_id, char *name, int playback, int capture, const char *cpu_dai_name, const char *platform_comp_name, int num_platforms, const char *codec_name, - const char *codec_dai_name, + const char *codec_dai_name, int no_pcm, int (*init)(struct snd_soc_pcm_runtime *rtd), const struct snd_soc_ops *ops); @@ -234,8 +237,7 @@ int asoc_sdw_rt_sdca_jack_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_s int asoc_sdw_rt_amp_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); int asoc_sdw_rt700_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); int asoc_sdw_rt711_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); -int asoc_sdw_rt712_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); -int asoc_sdw_rt722_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); +int asoc_sdw_rt_mf_sdca_spk_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); int asoc_sdw_rt5682_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); int asoc_sdw_cs42l42_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); int asoc_sdw_cs42l43_hs_rtd_init(struct snd_soc_pcm_runtime *rtd, struct snd_soc_dai *dai); diff --git a/include/sound/sof/ext_manifest.h b/include/sound/sof/ext_manifest.h index fc0231d04a94..7dfe3ccf1fe4 100644 --- a/include/sound/sof/ext_manifest.h +++ b/include/sound/sof/ext_manifest.h @@ -60,6 +60,7 @@ enum sof_ext_man_elem_type { SOF_EXT_MAN_ELEM_FW_VERSION = 0, SOF_EXT_MAN_ELEM_WINDOW = 1, SOF_EXT_MAN_ELEM_CC_VERSION = 2, + SOF_EXT_MAN_ELEM_PROBE_INFO = 3, SOF_EXT_MAN_ELEM_DBG_ABI = 4, SOF_EXT_MAN_ELEM_CONFIG_DATA = 5, /**< ABI3.17 */ SOF_EXT_MAN_ELEM_PLATFORM_CONFIG_DATA = 6, diff --git a/include/sound/tas2781.h b/include/sound/tas2781.h index 8cd6da0480b7..72d2060904f6 100644 --- a/include/sound/tas2781.h +++ b/include/sound/tas2781.h @@ -156,6 +156,7 @@ struct tasdevice_priv { struct tasdevice_rca rcabin; struct calidata cali_data; struct tasdevice_fw *fmw; + struct gpio_desc *speaker_id; struct gpio_desc *reset; struct mutex codec_lock; struct regmap *regmap; diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h index a2ea11cc912e..183fa2aa2935 100644 --- a/include/trace/bpf_probe.h +++ b/include/trace/bpf_probe.h @@ -53,6 +53,20 @@ __bpf_trace_##call(void *__data, proto) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ __BPF_DECLARE_TRACE(call, PARAMS(proto), PARAMS(args)) +#define __BPF_DECLARE_TRACE_SYSCALL(call, proto, args) \ +static notrace void \ +__bpf_trace_##call(void *__data, proto) \ +{ \ + might_fault(); \ + preempt_disable_notrace(); \ + CONCATENATE(bpf_trace_run, COUNT_ARGS(args))(__data, CAST_TO_U64(args)); \ + preempt_enable_notrace(); \ +} + +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ + __BPF_DECLARE_TRACE_SYSCALL(call, PARAMS(proto), PARAMS(args)) + /* * This part is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h index 00723935dcc7..e1c1079f8c8d 100644 --- a/include/trace/define_trace.h +++ b/include/trace/define_trace.h @@ -46,6 +46,10 @@ assign, print, reg, unreg) \ DEFINE_TRACE_FN(name, reg, unreg, PARAMS(proto), PARAMS(args)) +#undef TRACE_EVENT_SYSCALL +#define TRACE_EVENT_SYSCALL(name, proto, args, struct, assign, print, reg, unreg) \ + DEFINE_TRACE_SYSCALL(name, reg, unreg, PARAMS(proto), PARAMS(args)) + #undef TRACE_EVENT_NOP #define TRACE_EVENT_NOP(name, proto, args, struct, assign, print) @@ -72,6 +76,13 @@ #define DECLARE_TRACE(name, proto, args) \ DEFINE_TRACE(name, PARAMS(proto), PARAMS(args)) +/* If requested, create helpers for calling these tracepoints from Rust. */ +#ifdef CREATE_RUST_TRACE_POINTS +#undef DEFINE_RUST_DO_TRACE +#define DEFINE_RUST_DO_TRACE(name, proto, args) \ + __DEFINE_RUST_DO_TRACE(name, PARAMS(proto), PARAMS(args)) +#endif + #undef TRACE_INCLUDE #undef __TRACE_INCLUDE @@ -107,6 +118,7 @@ #undef TRACE_EVENT #undef TRACE_EVENT_FN #undef TRACE_EVENT_FN_COND +#undef TRACE_EVENT_SYSCALL #undef TRACE_EVENT_CONDITION #undef TRACE_EVENT_NOP #undef DEFINE_EVENT_NOP @@ -129,6 +141,11 @@ # undef UNDEF_TRACE_INCLUDE_PATH #endif +#ifdef CREATE_RUST_TRACE_POINTS +# undef DEFINE_RUST_DO_TRACE +# define DEFINE_RUST_DO_TRACE(name, proto, args) +#endif + /* We may be processing more files */ #define CREATE_TRACE_POINTS diff --git a/include/trace/events/block.h b/include/trace/events/block.h index 1527d5d45e01..bd0ea07338eb 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h @@ -99,7 +99,7 @@ TRACE_EVENT(block_rq_requeue, __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0; __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); - __entry->ioprio = rq->ioprio; + __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; @@ -136,7 +136,7 @@ DECLARE_EVENT_CLASS(block_rq_completion, __entry->sector = blk_rq_pos(rq); __entry->nr_sector = nr_bytes >> 9; __entry->error = blk_status_to_errno(error); - __entry->ioprio = rq->ioprio; + __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; @@ -209,7 +209,7 @@ DECLARE_EVENT_CLASS(block_rq, __entry->sector = blk_rq_trace_sector(rq); __entry->nr_sector = blk_rq_trace_nr_sectors(rq); __entry->bytes = blk_rq_bytes(rq); - __entry->ioprio = rq->ioprio; + __entry->ioprio = req_get_ioprio(rq); blk_fill_rwbs(__entry->rwbs, rq->cmd_flags); __get_str(cmd)[0] = '\0'; diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index af6b3827fb1d..4df93ca9b7a8 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -1706,9 +1706,10 @@ DEFINE_EVENT(btrfs__qgroup_rsv_data, btrfs_qgroup_release_data, DECLARE_EVENT_CLASS(btrfs_qgroup_extent, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_qgroup_extent_record *rec), + const struct btrfs_qgroup_extent_record *rec, + u64 bytenr), - TP_ARGS(fs_info, rec), + TP_ARGS(fs_info, rec, bytenr), TP_STRUCT__entry_btrfs( __field( u64, bytenr ) @@ -1716,7 +1717,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent, ), TP_fast_assign_btrfs(fs_info, - __entry->bytenr = rec->bytenr; + __entry->bytenr = bytenr; __entry->num_bytes = rec->num_bytes; ), @@ -1727,17 +1728,19 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent, DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_qgroup_extent_record *rec), + const struct btrfs_qgroup_extent_record *rec, + u64 bytenr), - TP_ARGS(fs_info, rec) + TP_ARGS(fs_info, rec, bytenr) ); DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_trace_extent, TP_PROTO(const struct btrfs_fs_info *fs_info, - const struct btrfs_qgroup_extent_record *rec), + const struct btrfs_qgroup_extent_record *rec, + u64 bytenr), - TP_ARGS(fs_info, rec) + TP_ARGS(fs_info, rec, bytenr) ); TRACE_EVENT(qgroup_num_dirty_extents, @@ -2341,7 +2344,6 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_unlock_blocking); DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_read); DEFINE_BTRFS_LOCK_EVENT(btrfs_set_lock_blocking_write); DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_read_lock); -DEFINE_BTRFS_LOCK_EVENT(btrfs_try_tree_write_lock); DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic); DECLARE_EVENT_CLASS(btrfs__space_info_update, @@ -2553,10 +2555,9 @@ TRACE_EVENT(btrfs_extent_map_shrinker_count, TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter, - TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_to_scan, long nr, - u64 last_root_id, u64 last_ino), + TP_PROTO(const struct btrfs_fs_info *fs_info, long nr), - TP_ARGS(fs_info, nr_to_scan, nr, last_root_id, last_ino), + TP_ARGS(fs_info, nr), TP_STRUCT__entry_btrfs( __field( long, nr_to_scan ) @@ -2566,10 +2567,11 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter, ), TP_fast_assign_btrfs(fs_info, - __entry->nr_to_scan = nr_to_scan; + __entry->nr_to_scan = \ + atomic64_read(&fs_info->em_shrinker_nr_to_scan); __entry->nr = nr; - __entry->last_root_id = last_root_id; - __entry->last_ino = last_ino; + __entry->last_root_id = fs_info->em_shrinker_last_root; + __entry->last_ino = fs_info->em_shrinker_last_ino; ), TP_printk_btrfs("nr_to_scan=%ld nr=%ld last_root=%llu(%s) last_ino=%llu", @@ -2579,10 +2581,9 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_enter, TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit, - TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr, - u64 last_root_id, u64 last_ino), + TP_PROTO(const struct btrfs_fs_info *fs_info, long nr_dropped, long nr), - TP_ARGS(fs_info, nr_dropped, nr, last_root_id, last_ino), + TP_ARGS(fs_info, nr_dropped, nr), TP_STRUCT__entry_btrfs( __field( long, nr_dropped ) @@ -2594,8 +2595,8 @@ TRACE_EVENT(btrfs_extent_map_shrinker_scan_exit, TP_fast_assign_btrfs(fs_info, __entry->nr_dropped = nr_dropped; __entry->nr = nr; - __entry->last_root_id = last_root_id; - __entry->last_ino = last_ino; + __entry->last_root_id = fs_info->em_shrinker_last_root; + __entry->last_ino = fs_info->em_shrinker_last_ino; ), TP_printk_btrfs("nr_dropped=%ld nr=%ld last_root=%llu(%s) last_ino=%llu", diff --git a/include/trace/events/dma.h b/include/trace/events/dma.h index b0f41265191c..d8ddc27b6a7c 100644 --- a/include/trace/events/dma.h +++ b/include/trace/events/dma.h @@ -65,15 +65,14 @@ DECLARE_EVENT_CLASS(dma_map, decode_dma_attrs(__entry->attrs)) ); -DEFINE_EVENT(dma_map, dma_map_page, - TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, unsigned long attrs), - TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs)); +#define DEFINE_MAP_EVENT(name) \ +DEFINE_EVENT(dma_map, name, \ + TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, \ + size_t size, enum dma_data_direction dir, unsigned long attrs), \ + TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs)) -DEFINE_EVENT(dma_map, dma_map_resource, - TP_PROTO(struct device *dev, phys_addr_t phys_addr, dma_addr_t dma_addr, - size_t size, enum dma_data_direction dir, unsigned long attrs), - TP_ARGS(dev, phys_addr, dma_addr, size, dir, attrs)); +DEFINE_MAP_EVENT(dma_map_page); +DEFINE_MAP_EVENT(dma_map_resource); DECLARE_EVENT_CLASS(dma_unmap, TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, @@ -104,20 +103,20 @@ DECLARE_EVENT_CLASS(dma_unmap, decode_dma_attrs(__entry->attrs)) ); -DEFINE_EVENT(dma_unmap, dma_unmap_page, - TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir, unsigned long attrs), - TP_ARGS(dev, addr, size, dir, attrs)); +#define DEFINE_UNMAP_EVENT(name) \ +DEFINE_EVENT(dma_unmap, name, \ + TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, \ + enum dma_data_direction dir, unsigned long attrs), \ + TP_ARGS(dev, addr, size, dir, attrs)) -DEFINE_EVENT(dma_unmap, dma_unmap_resource, - TP_PROTO(struct device *dev, dma_addr_t addr, size_t size, - enum dma_data_direction dir, unsigned long attrs), - TP_ARGS(dev, addr, size, dir, attrs)); +DEFINE_UNMAP_EVENT(dma_unmap_page); +DEFINE_UNMAP_EVENT(dma_unmap_resource); -TRACE_EVENT(dma_alloc, +DECLARE_EVENT_CLASS(dma_alloc_class, TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, - size_t size, gfp_t flags, unsigned long attrs), - TP_ARGS(dev, virt_addr, dma_addr, size, flags, attrs), + size_t size, enum dma_data_direction dir, gfp_t flags, + unsigned long attrs), + TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs), TP_STRUCT__entry( __string(device, dev_name(dev)) @@ -125,6 +124,7 @@ TRACE_EVENT(dma_alloc, __field(u64, dma_addr) __field(size_t, size) __field(gfp_t, flags) + __field(enum dma_data_direction, dir) __field(unsigned long, attrs) ), @@ -137,8 +137,9 @@ TRACE_EVENT(dma_alloc, __entry->attrs = attrs; ), - TP_printk("%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s", + TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p flags=%s attrs=%s", __get_str(device), + decode_dma_data_direction(__entry->dir), __entry->dma_addr, __entry->size, __entry->virt_addr, @@ -146,16 +147,69 @@ TRACE_EVENT(dma_alloc, decode_dma_attrs(__entry->attrs)) ); -TRACE_EVENT(dma_free, +#define DEFINE_ALLOC_EVENT(name) \ +DEFINE_EVENT(dma_alloc_class, name, \ + TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \ + size_t size, enum dma_data_direction dir, gfp_t flags, \ + unsigned long attrs), \ + TP_ARGS(dev, virt_addr, dma_addr, size, dir, flags, attrs)) + +DEFINE_ALLOC_EVENT(dma_alloc); +DEFINE_ALLOC_EVENT(dma_alloc_pages); +DEFINE_ALLOC_EVENT(dma_alloc_sgt_err); + +TRACE_EVENT(dma_alloc_sgt, + TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size, + enum dma_data_direction dir, gfp_t flags, unsigned long attrs), + TP_ARGS(dev, sgt, size, dir, flags, attrs), + + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __dynamic_array(u64, phys_addrs, sgt->orig_nents) + __field(u64, dma_addr) + __field(size_t, size) + __field(enum dma_data_direction, dir) + __field(gfp_t, flags) + __field(unsigned long, attrs) + ), + + TP_fast_assign( + struct scatterlist *sg; + int i; + + __assign_str(device); + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) + ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg); + __entry->dma_addr = sg_dma_address(sgt->sgl); + __entry->size = size; + __entry->dir = dir; + __entry->flags = flags; + __entry->attrs = attrs; + ), + + TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s flags=%s attrs=%s", + __get_str(device), + decode_dma_data_direction(__entry->dir), + __entry->dma_addr, + __entry->size, + __print_array(__get_dynamic_array(phys_addrs), + __get_dynamic_array_len(phys_addrs) / + sizeof(u64), sizeof(u64)), + show_gfp_flags(__entry->flags), + decode_dma_attrs(__entry->attrs)) +); + +DECLARE_EVENT_CLASS(dma_free_class, TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, - size_t size, unsigned long attrs), - TP_ARGS(dev, virt_addr, dma_addr, size, attrs), + size_t size, enum dma_data_direction dir, unsigned long attrs), + TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs), TP_STRUCT__entry( __string(device, dev_name(dev)) __field(void *, virt_addr) __field(u64, dma_addr) __field(size_t, size) + __field(enum dma_data_direction, dir) __field(unsigned long, attrs) ), @@ -164,17 +218,63 @@ TRACE_EVENT(dma_free, __entry->virt_addr = virt_addr; __entry->dma_addr = dma_addr; __entry->size = size; + __entry->dir = dir; __entry->attrs = attrs; ), - TP_printk("%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s", + TP_printk("%s dir=%s dma_addr=%llx size=%zu virt_addr=%p attrs=%s", __get_str(device), + decode_dma_data_direction(__entry->dir), __entry->dma_addr, __entry->size, __entry->virt_addr, decode_dma_attrs(__entry->attrs)) ); +#define DEFINE_FREE_EVENT(name) \ +DEFINE_EVENT(dma_free_class, name, \ + TP_PROTO(struct device *dev, void *virt_addr, dma_addr_t dma_addr, \ + size_t size, enum dma_data_direction dir, unsigned long attrs), \ + TP_ARGS(dev, virt_addr, dma_addr, size, dir, attrs)) + +DEFINE_FREE_EVENT(dma_free); +DEFINE_FREE_EVENT(dma_free_pages); + +TRACE_EVENT(dma_free_sgt, + TP_PROTO(struct device *dev, struct sg_table *sgt, size_t size, + enum dma_data_direction dir), + TP_ARGS(dev, sgt, size, dir), + + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __dynamic_array(u64, phys_addrs, sgt->orig_nents) + __field(u64, dma_addr) + __field(size_t, size) + __field(enum dma_data_direction, dir) + ), + + TP_fast_assign( + struct scatterlist *sg; + int i; + + __assign_str(device); + for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) + ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg); + __entry->dma_addr = sg_dma_address(sgt->sgl); + __entry->size = size; + __entry->dir = dir; + ), + + TP_printk("%s dir=%s dma_addr=%llx size=%zu phys_addrs=%s", + __get_str(device), + decode_dma_data_direction(__entry->dir), + __entry->dma_addr, + __entry->size, + __print_array(__get_dynamic_array(phys_addrs), + __get_dynamic_array_len(phys_addrs) / + sizeof(u64), sizeof(u64))) +); + TRACE_EVENT(dma_map_sg, TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents, int ents, enum dma_data_direction dir, unsigned long attrs), @@ -221,6 +321,41 @@ TRACE_EVENT(dma_map_sg, decode_dma_attrs(__entry->attrs)) ); +TRACE_EVENT(dma_map_sg_err, + TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents, + int err, enum dma_data_direction dir, unsigned long attrs), + TP_ARGS(dev, sgl, nents, err, dir, attrs), + + TP_STRUCT__entry( + __string(device, dev_name(dev)) + __dynamic_array(u64, phys_addrs, nents) + __field(int, err) + __field(enum dma_data_direction, dir) + __field(unsigned long, attrs) + ), + + TP_fast_assign( + struct scatterlist *sg; + int i; + + __assign_str(device); + for_each_sg(sgl, sg, nents, i) + ((u64 *)__get_dynamic_array(phys_addrs))[i] = sg_phys(sg); + __entry->err = err; + __entry->dir = dir; + __entry->attrs = attrs; + ), + + TP_printk("%s dir=%s dma_addrs=%s err=%d attrs=%s", + __get_str(device), + decode_dma_data_direction(__entry->dir), + __print_array(__get_dynamic_array(phys_addrs), + __get_dynamic_array_len(phys_addrs) / + sizeof(u64), sizeof(u64)), + __entry->err, + decode_dma_attrs(__entry->attrs)) +); + TRACE_EVENT(dma_unmap_sg, TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, unsigned long attrs), @@ -279,15 +414,14 @@ DECLARE_EVENT_CLASS(dma_sync_single, __entry->size) ); -DEFINE_EVENT(dma_sync_single, dma_sync_single_for_cpu, - TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir), - TP_ARGS(dev, dma_addr, size, dir)); +#define DEFINE_SYNC_SINGLE_EVENT(name) \ +DEFINE_EVENT(dma_sync_single, name, \ + TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size, \ + enum dma_data_direction dir), \ + TP_ARGS(dev, dma_addr, size, dir)) -DEFINE_EVENT(dma_sync_single, dma_sync_single_for_device, - TP_PROTO(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir), - TP_ARGS(dev, dma_addr, size, dir)); +DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_cpu); +DEFINE_SYNC_SINGLE_EVENT(dma_sync_single_for_device); DECLARE_EVENT_CLASS(dma_sync_sg, TP_PROTO(struct device *dev, struct scatterlist *sgl, int nents, @@ -326,15 +460,14 @@ DECLARE_EVENT_CLASS(dma_sync_sg, sizeof(unsigned int), sizeof(unsigned int))) ); -DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_cpu, - TP_PROTO(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir), - TP_ARGS(dev, sg, nents, dir)); +#define DEFINE_SYNC_SG_EVENT(name) \ +DEFINE_EVENT(dma_sync_sg, name, \ + TP_PROTO(struct device *dev, struct scatterlist *sg, int nents, \ + enum dma_data_direction dir), \ + TP_ARGS(dev, sg, nents, dir)) -DEFINE_EVENT(dma_sync_sg, dma_sync_sg_for_device, - TP_PROTO(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir), - TP_ARGS(dev, sg, nents, dir)); +DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_cpu); +DEFINE_SYNC_SG_EVENT(dma_sync_sg_for_device); #endif /* _TRACE_DMA_H */ diff --git a/include/trace/events/hugetlbfs.h b/include/trace/events/hugetlbfs.h new file mode 100644 index 000000000000..8331c904a9ba --- /dev/null +++ b/include/trace/events/hugetlbfs.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hugetlbfs + +#if !defined(_TRACE_HUGETLBFS_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HUGETLBFS_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(hugetlbfs_alloc_inode, + + TP_PROTO(struct inode *inode, struct inode *dir, int mode), + + TP_ARGS(inode, dir, mode), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(ino_t, dir) + __field(__u16, mode) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->dir = dir->i_ino; + __entry->mode = mode; + ), + + TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, + (unsigned long) __entry->dir, __entry->mode) +); + +DECLARE_EVENT_CLASS(hugetlbfs__inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(__u16, mode) + __field(loff_t, size) + __field(unsigned int, nlink) + __field(unsigned int, seals) + __field(blkcnt_t, blocks) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = inode->i_mode; + __entry->size = inode->i_size; + __entry->nlink = inode->i_nlink; + __entry->seals = HUGETLBFS_I(inode)->seals; + __entry->blocks = inode->i_blocks; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o size %lld nlink %u seals %u blocks %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long) __entry->ino, + __entry->mode, __entry->size, __entry->nlink, __entry->seals, + (unsigned long long)__entry->blocks) +); + +DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_evict_inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +DEFINE_EVENT(hugetlbfs__inode, hugetlbfs_free_inode, + + TP_PROTO(struct inode *inode), + + TP_ARGS(inode) +); + +TRACE_EVENT(hugetlbfs_setattr, + + TP_PROTO(struct inode *inode, struct dentry *dentry, + struct iattr *attr), + + TP_ARGS(inode, dentry, attr), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(unsigned int, d_len) + __string(d_name, dentry->d_name.name) + __field(unsigned int, ia_valid) + __field(unsigned int, ia_mode) + __field(loff_t, old_size) + __field(loff_t, ia_size) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->d_len = dentry->d_name.len; + __assign_str(d_name); + __entry->ia_valid = attr->ia_valid; + __entry->ia_mode = attr->ia_mode; + __entry->old_size = inode->i_size; + __entry->ia_size = attr->ia_size; + ), + + TP_printk("dev %d,%d ino %lu name %.*s valid %#x mode 0%o old_size %lld size %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long)__entry->ino, + __entry->d_len, __get_str(d_name), __entry->ia_valid, __entry->ia_mode, + __entry->old_size, __entry->ia_size) +); + +TRACE_EVENT(hugetlbfs_fallocate, + + TP_PROTO(struct inode *inode, int mode, + loff_t offset, loff_t len, int ret), + + TP_ARGS(inode, mode, offset, len, ret), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(int, mode) + __field(loff_t, offset) + __field(loff_t, len) + __field(loff_t, size) + __field(int, ret) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->mode = mode; + __entry->offset = offset; + __entry->len = len; + __entry->size = inode->i_size; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu mode 0%o offset %lld len %lld size %lld ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long)__entry->ino, __entry->mode, + (unsigned long long)__entry->offset, + (unsigned long long)__entry->len, + (unsigned long long)__entry->size, + __entry->ret) +); + +#endif /* _TRACE_HUGETLBFS_H */ + + /* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h index 412c9c210a32..fb81c533b310 100644 --- a/include/trace/events/io_uring.h +++ b/include/trace/events/io_uring.h @@ -315,20 +315,14 @@ TRACE_EVENT(io_uring_fail_link, * io_uring_complete - called when completing an SQE * * @ctx: pointer to a ring context structure - * @req: pointer to a submitted request - * @user_data: user data associated with the request - * @res: result of the request - * @cflags: completion flags - * @extra1: extra 64-bit data for CQE32 - * @extra2: extra 64-bit data for CQE32 - * + * @req: (optional) pointer to a submitted request + * @cqe: pointer to the filled in CQE being posted */ TRACE_EVENT(io_uring_complete, - TP_PROTO(void *ctx, void *req, u64 user_data, int res, unsigned cflags, - u64 extra1, u64 extra2), +TP_PROTO(struct io_ring_ctx *ctx, void *req, struct io_uring_cqe *cqe), - TP_ARGS(ctx, req, user_data, res, cflags, extra1, extra2), + TP_ARGS(ctx, req, cqe), TP_STRUCT__entry ( __field( void *, ctx ) @@ -343,11 +337,11 @@ TRACE_EVENT(io_uring_complete, TP_fast_assign( __entry->ctx = ctx; __entry->req = req; - __entry->user_data = user_data; - __entry->res = res; - __entry->cflags = cflags; - __entry->extra1 = extra1; - __entry->extra2 = extra2; + __entry->user_data = cqe->user_data; + __entry->res = cqe->res; + __entry->cflags = cqe->flags; + __entry->extra1 = io_ctx_cqe32(ctx) ? cqe->big_cqe[0] : 0; + __entry->extra2 = io_ctx_cqe32(ctx) ? cqe->big_cqe[1] : 0; ), TP_printk("ring %p, req %p, user_data 0x%llx, result %d, cflags 0x%x " diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h index 74e40d5d4af4..fc7d0f8ff078 100644 --- a/include/trace/events/kvm.h +++ b/include/trace/events/kvm.h @@ -236,6 +236,41 @@ TRACE_EVENT(kvm_mmio, __entry->len, __entry->gpa, __entry->val) ); +#define KVM_TRACE_IOCSR_READ_UNSATISFIED 0 +#define KVM_TRACE_IOCSR_READ 1 +#define KVM_TRACE_IOCSR_WRITE 2 + +#define kvm_trace_symbol_iocsr \ + { KVM_TRACE_IOCSR_READ_UNSATISFIED, "unsatisfied-read" }, \ + { KVM_TRACE_IOCSR_READ, "read" }, \ + { KVM_TRACE_IOCSR_WRITE, "write" } + +TRACE_EVENT(kvm_iocsr, + TP_PROTO(int type, int len, u64 gpa, void *val), + TP_ARGS(type, len, gpa, val), + + TP_STRUCT__entry( + __field( u32, type ) + __field( u32, len ) + __field( u64, gpa ) + __field( u64, val ) + ), + + TP_fast_assign( + __entry->type = type; + __entry->len = len; + __entry->gpa = gpa; + __entry->val = 0; + if (val) + memcpy(&__entry->val, val, + min_t(u32, sizeof(__entry->val), len)); + ), + + TP_printk("iocsr %s len %u gpa 0x%llx val 0x%llx", + __print_symbolic(__entry->type, kvm_trace_symbol_iocsr), + __entry->len, __entry->gpa, __entry->val) +); + #define kvm_fpu_load_symbol \ {0, "unload"}, \ {1, "load"} diff --git a/include/trace/events/mce.h b/include/trace/events/mce.h index f0f7b3cb2041..c1c50df9ecfd 100644 --- a/include/trace/events/mce.h +++ b/include/trace/events/mce.h @@ -19,9 +19,9 @@ TRACE_EVENT(mce_record, - TP_PROTO(struct mce *m), + TP_PROTO(struct mce_hw_err *err), - TP_ARGS(m), + TP_ARGS(err), TP_STRUCT__entry( __field( u64, mcgcap ) @@ -43,31 +43,33 @@ TRACE_EVENT(mce_record, __field( u8, bank ) __field( u8, cpuvendor ) __field( u32, microcode ) + __dynamic_array(u8, v_data, sizeof(err->vendor)) ), TP_fast_assign( - __entry->mcgcap = m->mcgcap; - __entry->mcgstatus = m->mcgstatus; - __entry->status = m->status; - __entry->addr = m->addr; - __entry->misc = m->misc; - __entry->synd = m->synd; - __entry->ipid = m->ipid; - __entry->ip = m->ip; - __entry->tsc = m->tsc; - __entry->ppin = m->ppin; - __entry->walltime = m->time; - __entry->cpu = m->extcpu; - __entry->cpuid = m->cpuid; - __entry->apicid = m->apicid; - __entry->socketid = m->socketid; - __entry->cs = m->cs; - __entry->bank = m->bank; - __entry->cpuvendor = m->cpuvendor; - __entry->microcode = m->microcode; + __entry->mcgcap = err->m.mcgcap; + __entry->mcgstatus = err->m.mcgstatus; + __entry->status = err->m.status; + __entry->addr = err->m.addr; + __entry->misc = err->m.misc; + __entry->synd = err->m.synd; + __entry->ipid = err->m.ipid; + __entry->ip = err->m.ip; + __entry->tsc = err->m.tsc; + __entry->ppin = err->m.ppin; + __entry->walltime = err->m.time; + __entry->cpu = err->m.extcpu; + __entry->cpuid = err->m.cpuid; + __entry->apicid = err->m.apicid; + __entry->socketid = err->m.socketid; + __entry->cs = err->m.cs; + __entry->bank = err->m.bank; + __entry->cpuvendor = err->m.cpuvendor; + __entry->microcode = err->m.microcode; + memcpy(__get_dynamic_array(v_data), &err->vendor, sizeof(err->vendor)); ), - TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016Lx, IPID: %016Lx, ADDR: %016Lx, MISC: %016Lx, SYND: %016Lx, RIP: %02x:<%016Lx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x", + TP_printk("CPU: %d, MCGc/s: %llx/%llx, MC%d: %016llx, IPID: %016llx, ADDR: %016llx, MISC: %016llx, SYND: %016llx, RIP: %02x:<%016llx>, TSC: %llx, PPIN: %llx, vendor: %u, CPUID: %x, time: %llu, socket: %u, APIC: %x, microcode: %x, vendor data: %s", __entry->cpu, __entry->mcgcap, __entry->mcgstatus, __entry->bank, __entry->status, @@ -83,7 +85,8 @@ TRACE_EVENT(mce_record, __entry->walltime, __entry->socketid, __entry->apicid, - __entry->microcode) + __entry->microcode, + __print_dynamic_array(v_data, sizeof(u8))) ); #endif /* _TRACE_MCE_H */ diff --git a/include/trace/events/memcg.h b/include/trace/events/memcg.h new file mode 100644 index 000000000000..dfe2f51019b4 --- /dev/null +++ b/include/trace/events/memcg.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM memcg + +#if !defined(_TRACE_MEMCG_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_MEMCG_H + +#include <linux/memcontrol.h> +#include <linux/tracepoint.h> + + +DECLARE_EVENT_CLASS(memcg_rstat_stats, + + TP_PROTO(struct mem_cgroup *memcg, int item, int val), + + TP_ARGS(memcg, item, val), + + TP_STRUCT__entry( + __field(u64, id) + __field(int, item) + __field(int, val) + ), + + TP_fast_assign( + __entry->id = cgroup_id(memcg->css.cgroup); + __entry->item = item; + __entry->val = val; + ), + + TP_printk("memcg_id=%llu item=%d val=%d", + __entry->id, __entry->item, __entry->val) +); + +DEFINE_EVENT(memcg_rstat_stats, mod_memcg_state, + + TP_PROTO(struct mem_cgroup *memcg, int item, int val), + + TP_ARGS(memcg, item, val) +); + +DEFINE_EVENT(memcg_rstat_stats, mod_memcg_lruvec_state, + + TP_PROTO(struct mem_cgroup *memcg, int item, int val), + + TP_ARGS(memcg, item, val) +); + +DECLARE_EVENT_CLASS(memcg_rstat_events, + + TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val), + + TP_ARGS(memcg, item, val), + + TP_STRUCT__entry( + __field(u64, id) + __field(int, item) + __field(unsigned long, val) + ), + + TP_fast_assign( + __entry->id = cgroup_id(memcg->css.cgroup); + __entry->item = item; + __entry->val = val; + ), + + TP_printk("memcg_id=%llu item=%d val=%lu", + __entry->id, __entry->item, __entry->val) +); + +DEFINE_EVENT(memcg_rstat_events, count_memcg_events, + + TP_PROTO(struct mem_cgroup *memcg, int item, unsigned long val), + + TP_ARGS(memcg, item, val) +); + +TRACE_EVENT(memcg_flush_stats, + + TP_PROTO(struct mem_cgroup *memcg, s64 stats_updates, + bool force, bool needs_flush), + + TP_ARGS(memcg, stats_updates, force, needs_flush), + + TP_STRUCT__entry( + __field(u64, id) + __field(s64, stats_updates) + __field(bool, force) + __field(bool, needs_flush) + ), + + TP_fast_assign( + __entry->id = cgroup_id(memcg->css.cgroup); + __entry->stats_updates = stats_updates; + __entry->force = force; + __entry->needs_flush = needs_flush; + ), + + TP_printk("memcg_id=%llu stats_updates=%lld force=%d needs_flush=%d", + __entry->id, __entry->stats_updates, + __entry->force, __entry->needs_flush) +); + +#endif /* _TRACE_MEMCG_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h index f2827f98a44f..bc2e3ad787b3 100644 --- a/include/trace/events/mmap_lock.h +++ b/include/trace/events/mmap_lock.h @@ -10,9 +10,6 @@ struct mm_struct; -extern int trace_mmap_lock_reg(void); -extern void trace_mmap_lock_unreg(void); - DECLARE_EVENT_CLASS(mmap_lock, TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write), @@ -40,16 +37,15 @@ DECLARE_EVENT_CLASS(mmap_lock, ); #define DEFINE_MMAP_LOCK_EVENT(name) \ - DEFINE_EVENT_FN(mmap_lock, name, \ + DEFINE_EVENT(mmap_lock, name, \ TP_PROTO(struct mm_struct *mm, const char *memcg_path, \ bool write), \ - TP_ARGS(mm, memcg_path, write), \ - trace_mmap_lock_reg, trace_mmap_lock_unreg) + TP_ARGS(mm, memcg_path, write)) DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking); DEFINE_MMAP_LOCK_EVENT(mmap_lock_released); -TRACE_EVENT_FN(mmap_lock_acquire_returned, +TRACE_EVENT(mmap_lock_acquire_returned, TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write, bool success), @@ -76,9 +72,7 @@ TRACE_EVENT_FN(mmap_lock_acquire_returned, __get_str(memcg_path), __entry->write ? "true" : "false", __entry->success ? "true" : "false" - ), - - trace_mmap_lock_reg, trace_mmap_lock_unreg + ) ); #endif /* _TRACE_MMAP_LOCK_H */ diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h index 69975c9c6823..bf511bca896e 100644 --- a/include/trace/events/netfs.h +++ b/include/trace/events/netfs.h @@ -450,7 +450,7 @@ TRACE_EVENT(netfs_folio, struct address_space *__m = READ_ONCE(folio->mapping); __entry->ino = __m ? __m->host->i_ino : 0; __entry->why = why; - __entry->index = folio_index(folio); + __entry->index = folio->index; __entry->nr = folio_nr_pages(folio); ), diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index 3f249e150c0c..f99562d2b496 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h @@ -43,8 +43,6 @@ DEFINE_EVENT(preemptirq_template, irq_enable, #else #define trace_irq_enable(...) #define trace_irq_disable(...) -#define trace_irq_enable_rcuidle(...) -#define trace_irq_disable_rcuidle(...) #endif #ifdef CONFIG_TRACE_PREEMPT_TOGGLE @@ -58,8 +56,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, #else #define trace_preempt_enable(...) #define trace_preempt_disable(...) -#define trace_preempt_enable_rcuidle(...) -#define trace_preempt_disable_rcuidle(...) #endif #endif /* _TRACE_PREEMPTIRQ_H */ @@ -69,10 +65,6 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, #else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */ #define trace_irq_enable(...) #define trace_irq_disable(...) -#define trace_irq_enable_rcuidle(...) -#define trace_irq_disable_rcuidle(...) #define trace_preempt_enable(...) #define trace_preempt_disable(...) -#define trace_preempt_enable_rcuidle(...) -#define trace_preempt_disable_rcuidle(...) #endif diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h index 8022701c446d..8ba898fd335c 100644 --- a/include/trace/events/pwm.h +++ b/include/trace/events/pwm.h @@ -8,15 +8,135 @@ #include <linux/pwm.h> #include <linux/tracepoint.h> +#define TP_PROTO_pwm(args...) \ + TP_PROTO(struct pwm_device *pwm, args) + +#define TP_ARGS_pwm(args...) \ + TP_ARGS(pwm, args) + +#define TP_STRUCT__entry_pwm(args...) \ + TP_STRUCT__entry( \ + __field(unsigned int, chipid) \ + __field(unsigned int, hwpwm) \ + args) + +#define TP_fast_assign_pwm(args...) \ + TP_fast_assign( \ + __entry->chipid = pwm->chip->id; \ + __entry->hwpwm = pwm->hwpwm; \ + args) + +#define TP_printk_pwm(fmt, args...) \ + TP_printk("pwmchip%u.%u: " fmt, __entry->chipid, __entry->hwpwm, args) + +#define __field_pwmwf(wf) \ + __field(u64, wf ## _period_length_ns) \ + __field(u64, wf ## _duty_length_ns) \ + __field(u64, wf ## _duty_offset_ns) \ + +#define fast_assign_pwmwf(wf) \ + __entry->wf ## _period_length_ns = wf->period_length_ns; \ + __entry->wf ## _duty_length_ns = wf->duty_length_ns; \ + __entry->wf ## _duty_offset_ns = wf->duty_offset_ns + +#define printk_pwmwf_format(wf) \ + "%lld/%lld [+%lld]" + +#define printk_pwmwf_formatargs(wf) \ + __entry->wf ## _duty_length_ns, __entry->wf ## _period_length_ns, __entry->wf ## _duty_offset_ns + +TRACE_EVENT(pwm_round_waveform_tohw, + + TP_PROTO_pwm(const struct pwm_waveform *wf, void *wfhw, int err), + + TP_ARGS_pwm(wf, wfhw, err), + + TP_STRUCT__entry_pwm( + __field_pwmwf(wf) + __field(void *, wfhw) + __field(int, err) + ), + + TP_fast_assign_pwm( + fast_assign_pwmwf(wf); + __entry->wfhw = wfhw; + __entry->err = err; + ), + + TP_printk_pwm(printk_pwmwf_format(wf) " > %p err=%d", + printk_pwmwf_formatargs(wf), __entry->wfhw, __entry->err) +); + +TRACE_EVENT(pwm_round_waveform_fromhw, + + TP_PROTO_pwm(const void *wfhw, struct pwm_waveform *wf, int err), + + TP_ARGS_pwm(wfhw, wf, err), + + TP_STRUCT__entry_pwm( + __field(const void *, wfhw) + __field_pwmwf(wf) + __field(int, err) + ), + + TP_fast_assign_pwm( + __entry->wfhw = wfhw; + fast_assign_pwmwf(wf); + __entry->err = err; + ), + + TP_printk_pwm("%p > " printk_pwmwf_format(wf) " err=%d", + __entry->wfhw, printk_pwmwf_formatargs(wf), __entry->err) +); + +TRACE_EVENT(pwm_read_waveform, + + TP_PROTO_pwm(void *wfhw, int err), + + TP_ARGS_pwm(wfhw, err), + + TP_STRUCT__entry_pwm( + __field(void *, wfhw) + __field(int, err) + ), + + TP_fast_assign_pwm( + __entry->wfhw = wfhw; + __entry->err = err; + ), + + TP_printk_pwm("%p err=%d", + __entry->wfhw, __entry->err) +); + +TRACE_EVENT(pwm_write_waveform, + + TP_PROTO_pwm(const void *wfhw, int err), + + TP_ARGS_pwm(wfhw, err), + + TP_STRUCT__entry_pwm( + __field(const void *, wfhw) + __field(int, err) + ), + + TP_fast_assign_pwm( + __entry->wfhw = wfhw; + __entry->err = err; + ), + + TP_printk_pwm("%p err=%d", + __entry->wfhw, __entry->err) +); + + DECLARE_EVENT_CLASS(pwm, TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state, int err), TP_ARGS(pwm, state, err), - TP_STRUCT__entry( - __field(unsigned int, chipid) - __field(unsigned int, hwpwm) + TP_STRUCT__entry_pwm( __field(u64, period) __field(u64, duty_cycle) __field(enum pwm_polarity, polarity) @@ -24,9 +144,7 @@ DECLARE_EVENT_CLASS(pwm, __field(int, err) ), - TP_fast_assign( - __entry->chipid = pwm->chip->id; - __entry->hwpwm = pwm->hwpwm; + TP_fast_assign_pwm( __entry->period = state->period; __entry->duty_cycle = state->duty_cycle; __entry->polarity = state->polarity; @@ -34,8 +152,8 @@ DECLARE_EVENT_CLASS(pwm, __entry->err = err; ), - TP_printk("pwmchip%u.%u: period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d", - __entry->chipid, __entry->hwpwm, __entry->period, __entry->duty_cycle, + TP_printk_pwm("period=%llu duty_cycle=%llu polarity=%d enabled=%d err=%d", + __entry->period, __entry->duty_cycle, __entry->polarity, __entry->enabled, __entry->err) ); diff --git a/include/trace/events/rust_sample.h b/include/trace/events/rust_sample.h new file mode 100644 index 000000000000..dbc80ca2e465 --- /dev/null +++ b/include/trace/events/rust_sample.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Tracepoints for `samples/rust/rust_print.rs`. + * + * Copyright (C) 2024 Google, Inc. + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM rust_sample + +#if !defined(_RUST_SAMPLE_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _RUST_SAMPLE_TRACE_H + +#include <linux/tracepoint.h> + +TRACE_EVENT(rust_sample_loaded, + TP_PROTO(int magic_number), + TP_ARGS(magic_number), + TP_STRUCT__entry( + __field(int, magic_number) + ), + TP_fast_assign( + __entry->magic_number = magic_number; + ), + TP_printk("magic=%d", __entry->magic_number) +); + +#endif /* _RUST_SAMPLE_TRACE_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h index cc22596c7250..d03e0bd8c028 100644 --- a/include/trace/events/rxrpc.h +++ b/include/trace/events/rxrpc.h @@ -773,6 +773,31 @@ TRACE_EVENT(rxrpc_rx_done, TP_printk("r=%d a=%d", __entry->result, __entry->abort_code) ); +TRACE_EVENT(rxrpc_abort_call, + TP_PROTO(const struct rxrpc_call *call, int abort_code), + + TP_ARGS(call, abort_code), + + TP_STRUCT__entry( + __field(unsigned int, call_nr) + __field(enum rxrpc_abort_reason, why) + __field(int, abort_code) + __field(int, error) + ), + + TP_fast_assign( + __entry->call_nr = call->debug_id; + __entry->why = call->send_abort_why; + __entry->abort_code = abort_code; + __entry->error = call->send_abort_err; + ), + + TP_printk("c=%08x a=%d e=%d %s", + __entry->call_nr, + __entry->abort_code, __entry->error, + __print_symbolic(__entry->why, rxrpc_abort_reasons)) + ); + TRACE_EVENT(rxrpc_abort, TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why, u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error), diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index 5e8495216689..b13dc275ef4a 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -719,7 +719,7 @@ TRACE_EVENT(rpc_xdr_overflow, ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER - " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u\n", + " %sv%d %s requested=%zu p=%p end=%p xdr=[%p,%zu]/%u/[%p,%zu]/%u", __entry->task_id, __entry->client_id, __get_str(progname), __entry->version, __get_str(procedure), __entry->requested, __entry->p, __entry->end, @@ -777,7 +777,7 @@ TRACE_EVENT(rpc_xdr_alignment, ), TP_printk(SUNRPC_TRACE_TASK_SPECIFIER - " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u\n", + " %sv%d %s offset=%zu copied=%u xdr=[%p,%zu]/%u/[%p,%zu]/%u", __entry->task_id, __entry->client_id, __get_str(progname), __entry->version, __get_str(procedure), __entry->offset, __entry->copied, diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h index b6e0cbc2c71f..f31ff446b468 100644 --- a/include/trace/events/syscalls.h +++ b/include/trace/events/syscalls.h @@ -15,7 +15,7 @@ #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS -TRACE_EVENT_FN(sys_enter, +TRACE_EVENT_SYSCALL(sys_enter, TP_PROTO(struct pt_regs *regs, long id), @@ -41,7 +41,7 @@ TRACE_EVENT_FN(sys_enter, TRACE_EVENT_FLAGS(sys_enter, TRACE_EVENT_FL_CAP_ANY) -TRACE_EVENT_FN(sys_exit, +TRACE_EVENT_SYSCALL(sys_exit, TP_PROTO(struct pt_regs *regs, long ret), diff --git a/include/trace/events/timestamp.h b/include/trace/events/timestamp.h new file mode 100644 index 000000000000..c9e5ec930054 --- /dev/null +++ b/include/trace/events/timestamp.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM timestamp + +#if !defined(_TRACE_TIMESTAMP_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_TIMESTAMP_H + +#include <linux/tracepoint.h> +#include <linux/fs.h> + +#define CTIME_QUERIED_FLAGS \ + { I_CTIME_QUERIED, "Q" } + +DECLARE_EVENT_CLASS(ctime, + TP_PROTO(struct inode *inode, + struct timespec64 *ctime), + + TP_ARGS(inode, ctime), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(time64_t, ctime_s) + __field(u32, ctime_ns) + __field(u32, gen) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->gen = inode->i_generation; + __entry->ctime_s = ctime->tv_sec; + __entry->ctime_ns = ctime->tv_nsec; + ), + + TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen, + __entry->ctime_s, __entry->ctime_ns + ) +); + +DEFINE_EVENT(ctime, inode_set_ctime_to_ts, + TP_PROTO(struct inode *inode, + struct timespec64 *ctime), + TP_ARGS(inode, ctime)); + +DEFINE_EVENT(ctime, ctime_xchg_skip, + TP_PROTO(struct inode *inode, + struct timespec64 *ctime), + TP_ARGS(inode, ctime)); + +TRACE_EVENT(ctime_ns_xchg, + TP_PROTO(struct inode *inode, + u32 old, + u32 new, + u32 cur), + + TP_ARGS(inode, old, new, cur), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(u32, gen) + __field(u32, old) + __field(u32, new) + __field(u32, cur) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->gen = inode->i_generation; + __entry->old = old; + __entry->new = new; + __entry->cur = cur; + ), + + TP_printk("ino=%d:%d:%ld:%u old=%u:%s new=%u cur=%u:%s", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen, + __entry->old & ~I_CTIME_QUERIED, + __print_flags(__entry->old & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS), + __entry->new, + __entry->cur & ~I_CTIME_QUERIED, + __print_flags(__entry->cur & I_CTIME_QUERIED, "|", CTIME_QUERIED_FLAGS) + ) +); + +TRACE_EVENT(fill_mg_cmtime, + TP_PROTO(struct inode *inode, + struct timespec64 *ctime, + struct timespec64 *mtime), + + TP_ARGS(inode, ctime, mtime), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(ino_t, ino) + __field(time64_t, ctime_s) + __field(time64_t, mtime_s) + __field(u32, ctime_ns) + __field(u32, mtime_ns) + __field(u32, gen) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->gen = inode->i_generation; + __entry->ctime_s = ctime->tv_sec; + __entry->mtime_s = mtime->tv_sec; + __entry->ctime_ns = ctime->tv_nsec; + __entry->mtime_ns = mtime->tv_nsec; + ), + + TP_printk("ino=%d:%d:%ld:%u ctime=%lld.%u mtime=%lld.%u", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->ino, __entry->gen, + __entry->ctime_s, __entry->ctime_ns, + __entry->mtime_s, __entry->mtime_ns + ) +); +#endif /* _TRACE_TIMESTAMP_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index 1a488c30afa5..490958fa10de 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -346,6 +346,51 @@ TRACE_EVENT(mm_vmscan_write_folio, show_reclaim_flags(__entry->reclaim_flags)) ); +TRACE_EVENT(mm_vmscan_reclaim_pages, + + TP_PROTO(int nid, + unsigned long nr_scanned, unsigned long nr_reclaimed, + struct reclaim_stat *stat), + + TP_ARGS(nid, nr_scanned, nr_reclaimed, stat), + + TP_STRUCT__entry( + __field(int, nid) + __field(unsigned long, nr_scanned) + __field(unsigned long, nr_reclaimed) + __field(unsigned long, nr_dirty) + __field(unsigned long, nr_writeback) + __field(unsigned long, nr_congested) + __field(unsigned long, nr_immediate) + __field(unsigned int, nr_activate0) + __field(unsigned int, nr_activate1) + __field(unsigned long, nr_ref_keep) + __field(unsigned long, nr_unmap_fail) + ), + + TP_fast_assign( + __entry->nid = nid; + __entry->nr_scanned = nr_scanned; + __entry->nr_reclaimed = nr_reclaimed; + __entry->nr_dirty = stat->nr_dirty; + __entry->nr_writeback = stat->nr_writeback; + __entry->nr_congested = stat->nr_congested; + __entry->nr_immediate = stat->nr_immediate; + __entry->nr_activate0 = stat->nr_activate[0]; + __entry->nr_activate1 = stat->nr_activate[1]; + __entry->nr_ref_keep = stat->nr_ref_keep; + __entry->nr_unmap_fail = stat->nr_unmap_fail; + ), + + TP_printk("nid=%d nr_scanned=%ld nr_reclaimed=%ld nr_dirty=%ld nr_writeback=%ld nr_congested=%ld nr_immediate=%ld nr_activate_anon=%d nr_activate_file=%d nr_ref_keep=%ld nr_unmap_fail=%ld", + __entry->nid, + __entry->nr_scanned, __entry->nr_reclaimed, + __entry->nr_dirty, __entry->nr_writeback, + __entry->nr_congested, __entry->nr_immediate, + __entry->nr_activate0, __entry->nr_activate1, + __entry->nr_ref_keep, __entry->nr_unmap_fail) +); + TRACE_EVENT(mm_vmscan_lru_shrink_inactive, TP_PROTO(int nid, diff --git a/include/trace/perf.h b/include/trace/perf.h index 2c11181c82e0..a1754b73a8f5 100644 --- a/include/trace/perf.h +++ b/include/trace/perf.h @@ -12,10 +12,10 @@ #undef __perf_task #define __perf_task(t) (__task = (t)) -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +#undef __DECLARE_EVENT_CLASS +#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -perf_trace_##call(void *__data, proto) \ +do_perf_trace_##call(void *__data, proto) \ { \ struct trace_event_call *event_call = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ @@ -56,6 +56,41 @@ perf_trace_##call(void *__data, proto) \ } /* + * Define unused __count and __task variables to use @args to pass + * arguments to do_perf_trace_##call. This is needed because the + * macros __perf_count and __perf_task introduce the side-effect to + * store copies into those local variables. + */ +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +perf_trace_##call(void *__data, proto) \ +{ \ + u64 __count __attribute__((unused)); \ + struct task_struct *__task __attribute__((unused)); \ + \ + do_perf_trace_##call(__data, args); \ +} + +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +perf_trace_##call(void *__data, proto) \ +{ \ + u64 __count __attribute__((unused)); \ + struct task_struct *__task __attribute__((unused)); \ + \ + might_fault(); \ + preempt_disable_notrace(); \ + do_perf_trace_##call(__data, args); \ + preempt_enable_notrace(); \ +} + +/* * This part is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the * perf probe will fail to compile unless it too is updated. @@ -73,4 +108,7 @@ static inline void perf_test_probe_##call(void) \ DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +#undef __DECLARE_EVENT_CLASS + #endif /* CONFIG_PERF_EVENTS */ diff --git a/include/trace/stages/stage3_trace_output.h b/include/trace/stages/stage3_trace_output.h index c1fb1355d309..1e7b0bef95f5 100644 --- a/include/trace/stages/stage3_trace_output.h +++ b/include/trace/stages/stage3_trace_output.h @@ -119,6 +119,14 @@ trace_print_array_seq(p, array, count, el_size); \ }) +#undef __print_dynamic_array +#define __print_dynamic_array(array, el_size) \ + ({ \ + __print_array(__get_dynamic_array(array), \ + __get_dynamic_array_len(array) / (el_size), \ + (el_size)); \ + }) + #undef __print_hex_dump #define __print_hex_dump(prefix_str, prefix_type, \ rowsize, groupsize, buf, len, ascii) \ diff --git a/include/trace/stages/stage7_class_define.h b/include/trace/stages/stage7_class_define.h index bcb960d16fc0..fcd564a590f4 100644 --- a/include/trace/stages/stage7_class_define.h +++ b/include/trace/stages/stage7_class_define.h @@ -22,6 +22,7 @@ #undef __get_rel_cpumask #undef __get_rel_sockaddr #undef __print_array +#undef __print_dynamic_array #undef __print_hex_dump #undef __get_buf diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h index c2f9cabf154d..4f22136fd465 100644 --- a/include/trace/trace_events.h +++ b/include/trace/trace_events.h @@ -45,6 +45,16 @@ PARAMS(print)); \ DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); +#undef TRACE_EVENT_SYSCALL +#define TRACE_EVENT_SYSCALL(name, proto, args, tstruct, assign, print, reg, unreg) \ + DECLARE_EVENT_SYSCALL_CLASS(name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(tstruct), \ + PARAMS(assign), \ + PARAMS(print)); \ + DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); + #include "stages/stage1_struct_define.h" #undef DECLARE_EVENT_CLASS @@ -57,6 +67,9 @@ \ static struct trace_event_class event_class_##name; +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) \ static struct trace_event_call __used \ @@ -117,6 +130,9 @@ tstruct; \ }; +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #undef DEFINE_EVENT #define DEFINE_EVENT(template, name, proto, args) @@ -208,6 +224,9 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \ .trace = trace_raw_output_##call, \ }; +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ static notrace enum print_line_t \ @@ -244,6 +263,9 @@ static struct trace_event_fields trace_event_fields_##call[] = { \ tstruct \ {} }; +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #undef DEFINE_EVENT_PRINT #define DEFINE_EVENT_PRINT(template, name, proto, args, print) @@ -265,6 +287,9 @@ static inline notrace int trace_event_get_offsets_##call( \ return __data_size; \ } +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -374,11 +399,11 @@ static inline notrace int trace_event_get_offsets_##call( \ #include "stages/stage6_event_callback.h" -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ - \ + +#undef __DECLARE_EVENT_CLASS +#define __DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ static notrace void \ -trace_event_raw_event_##call(void *__data, proto) \ +do_trace_event_raw_event_##call(void *__data, proto) \ { \ struct trace_event_file *trace_file = __data; \ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\ @@ -403,6 +428,30 @@ trace_event_raw_event_##call(void *__data, proto) \ \ trace_event_buffer_commit(&fbuffer); \ } + +#undef DECLARE_EVENT_CLASS +#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +trace_event_raw_event_##call(void *__data, proto) \ +{ \ + do_trace_event_raw_event_##call(__data, args); \ +} + +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS(call, proto, args, tstruct, assign, print) \ +__DECLARE_EVENT_CLASS(call, PARAMS(proto), PARAMS(args), PARAMS(tstruct), \ + PARAMS(assign), PARAMS(print)) \ +static notrace void \ +trace_event_raw_event_##call(void *__data, proto) \ +{ \ + might_fault(); \ + preempt_disable_notrace(); \ + do_trace_event_raw_event_##call(__data, args); \ + preempt_enable_notrace(); \ +} + /* * The ftrace_test_probe is compiled out, it is only here as a build time check * to make sure that if the tracepoint handling changes, the ftrace probe will @@ -418,6 +467,8 @@ static inline void ftrace_test_probe_##call(void) \ #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef __DECLARE_EVENT_CLASS + #include "stages/stage7_class_define.h" #undef DECLARE_EVENT_CLASS @@ -434,6 +485,9 @@ static struct trace_event_class __used __refdata event_class_##call = { \ _TRACE_PERF_INIT(call) \ }; +#undef DECLARE_EVENT_SYSCALL_CLASS +#define DECLARE_EVENT_SYSCALL_CLASS DECLARE_EVENT_CLASS + #undef DEFINE_EVENT #define DEFINE_EVENT(template, call, proto, args) \ \ diff --git a/include/uapi/asm-generic/ioctl.h b/include/uapi/asm-generic/ioctl.h index a84f4db8a250..e3290a5824c9 100644 --- a/include/uapi/asm-generic/ioctl.h +++ b/include/uapi/asm-generic/ioctl.h @@ -82,13 +82,13 @@ * NOTE: _IOW means userland is writing and kernel is reading. _IOR * means userland is reading and kernel is writing. */ -#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) -#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size))) -#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size)) -#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size)) -#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size)) +#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0) +#define _IOR(type,nr,argtype) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(argtype))) +#define _IOW(type,nr,argtype) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(argtype))) +#define _IOWR(type,nr,argtype) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(argtype))) +#define _IOR_BAD(type,nr,argtype) _IOC(_IOC_READ,(type),(nr),sizeof(argtype)) +#define _IOW_BAD(type,nr,argtype) _IOC(_IOC_WRITE,(type),(nr),sizeof(argtype)) +#define _IOWR_BAD(type,nr,argtype) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(argtype)) /* used to decode ioctl numbers.. */ #define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK) diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h index 6ce1f1ceb432..1ea2c4c33b86 100644 --- a/include/uapi/asm-generic/mman-common.h +++ b/include/uapi/asm-generic/mman-common.h @@ -79,6 +79,9 @@ #define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ +#define MADV_GUARD_INSTALL 102 /* fatal signal on access to range */ +#define MADV_GUARD_REMOVE 103 /* unguard range */ + /* compatibility flags */ #define MAP_FILE 0 diff --git a/include/uapi/asm-generic/mman.h b/include/uapi/asm-generic/mman.h index 57e8195d0b53..5e3d61ddbd8c 100644 --- a/include/uapi/asm-generic/mman.h +++ b/include/uapi/asm-generic/mman.h @@ -19,4 +19,8 @@ #define MCL_FUTURE 2 /* lock all future mappings */ #define MCL_ONFAULT 4 /* lock all pages that are faulted in */ +#define SHADOW_STACK_SET_TOKEN (1ULL << 0) /* Set up a restore token in the shadow stack */ +#define SHADOW_STACK_SET_MARKER (1ULL << 1) /* Set up a top of stack marker in the shadow stack */ + + #endif /* __ASM_GENERIC_MMAN_H */ diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h index b7bc545ec3b2..5a1ca43b5fc6 100644 --- a/include/uapi/asm-generic/siginfo.h +++ b/include/uapi/asm-generic/siginfo.h @@ -46,7 +46,7 @@ union __sifields { __kernel_timer_t _tid; /* timer id */ int _overrun; /* overrun count */ sigval_t _sigval; /* same as below */ - int _sys_private; /* not to be passed to user */ + int _sys_private; /* Not used by the kernel. Historic leftover. Always 0. */ } _timer; /* POSIX.1b signals */ diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h index 3b4e3e815602..deacfd6dd197 100644 --- a/include/uapi/asm-generic/socket.h +++ b/include/uapi/asm-generic/socket.h @@ -141,6 +141,8 @@ #define SCM_DEVMEM_DMABUF SO_DEVMEM_DMABUF #define SO_DEVMEM_DONTNEED 80 +#define SCM_TS_OPT_ID 81 + #if !defined(__KERNEL__) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 5bf6148cac2b..88dc393c2bca 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -841,8 +841,17 @@ __SYSCALL(__NR_lsm_list_modules, sys_lsm_list_modules) #define __NR_mseal 462 __SYSCALL(__NR_mseal, sys_mseal) +#define __NR_setxattrat 463 +__SYSCALL(__NR_setxattrat, sys_setxattrat) +#define __NR_getxattrat 464 +__SYSCALL(__NR_getxattrat, sys_getxattrat) +#define __NR_listxattrat 465 +__SYSCALL(__NR_listxattrat, sys_listxattrat) +#define __NR_removexattrat 466 +__SYSCALL(__NR_removexattrat, sys_removexattrat) + #undef __NR_syscalls -#define __NR_syscalls 463 +#define __NR_syscalls 467 /* * 32 bit systems traditionally used different diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h index 16122819edfe..7fba37b94401 100644 --- a/include/uapi/drm/drm.h +++ b/include/uapi/drm/drm.h @@ -1024,6 +1024,13 @@ struct drm_crtc_queue_sequence { __u64 user_data; /* user data passed to event */ }; +#define DRM_CLIENT_NAME_MAX_LEN 64 +struct drm_set_client_name { + __u64 name_len; + __u64 name; +}; + + #if defined(__cplusplus) } #endif @@ -1288,6 +1295,16 @@ extern "C" { */ #define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb) +/** + * DRM_IOCTL_SET_CLIENT_NAME - Attach a name to a drm_file + * + * Having a name allows for easier tracking and debugging. + * The length of the name (without null ending char) must be + * <= DRM_CLIENT_NAME_MAX_LEN. + * The call will fail if the name contains whitespaces or non-printable chars. + */ +#define DRM_IOCTL_SET_CLIENT_NAME DRM_IOWR(0xD1, struct drm_set_client_name) + /* * Device specific ioctls should only be in their respective headers * The device specific ioctl range is from 0x40 to 0x9f. diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index 78abd819fd62..70f3b00b0681 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -1516,6 +1516,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) * 64K_D_2D on GFX12 is identical to 64K_D on GFX11. */ #define AMD_FMT_MOD_TILE_GFX9_64K_D 10 +#define AMD_FMT_MOD_TILE_GFX9_4K_D_X 22 #define AMD_FMT_MOD_TILE_GFX9_64K_S_X 25 #define AMD_FMT_MOD_TILE_GFX9_64K_D_X 26 #define AMD_FMT_MOD_TILE_GFX9_64K_R_X 27 diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h index 084fb529e1e9..a35b97b097bf 100644 --- a/include/uapi/drm/ivpu_accel.h +++ b/include/uapi/drm/ivpu_accel.h @@ -12,9 +12,6 @@ extern "C" { #endif -#define DRM_IVPU_DRIVER_MAJOR 1 -#define DRM_IVPU_DRIVER_MINOR 0 - #define DRM_IVPU_GET_PARAM 0x00 #define DRM_IVPU_SET_PARAM 0x01 #define DRM_IVPU_BO_CREATE 0x02 @@ -261,7 +258,7 @@ struct drm_ivpu_bo_info { /* drm_ivpu_submit engines */ #define DRM_IVPU_ENGINE_COMPUTE 0 -#define DRM_IVPU_ENGINE_COPY 1 +#define DRM_IVPU_ENGINE_COPY 1 /* Deprecated */ /** * struct drm_ivpu_submit - Submit commands to the VPU @@ -292,10 +289,6 @@ struct drm_ivpu_submit { * %DRM_IVPU_ENGINE_COMPUTE: * * Performs Deep Learning Neural Compute Inference Operations - * - * %DRM_IVPU_ENGINE_COPY: - * - * Performs memory copy operations to/from system memory allocated for VPU */ __u32 engine; diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 2377147b6af0..b916aab80dde 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -347,7 +347,10 @@ struct drm_msm_gem_madvise { * backwards compatibility as a "default" submitqueue */ -#define MSM_SUBMITQUEUE_FLAGS (0) +#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001 +#define MSM_SUBMITQUEUE_FLAGS ( \ + MSM_SUBMITQUEUE_ALLOW_PREEMPT | \ + 0) /* * The submitqueue priority should be between 0 and MSM_PARAM_PRIORITIES-1, diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h index 9f231d40a146..568724be6628 100644 --- a/include/uapi/drm/panfrost_drm.h +++ b/include/uapi/drm/panfrost_drm.h @@ -40,6 +40,7 @@ extern "C" { #define DRM_IOCTL_PANFROST_PERFCNT_DUMP DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_PERFCNT_DUMP, struct drm_panfrost_perfcnt_dump) #define PANFROST_JD_REQ_FS (1 << 0) +#define PANFROST_JD_REQ_CYCLE_COUNT (1 << 1) /** * struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D * engine. @@ -172,6 +173,8 @@ enum drm_panfrost_param { DRM_PANFROST_PARAM_NR_CORE_GROUPS, DRM_PANFROST_PARAM_THREAD_TLS_ALLOC, DRM_PANFROST_PARAM_AFBC_FEATURES, + DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP, + DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY, }; struct drm_panfrost_get_param { diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h index e23a7f9b0eac..87c9cb555dd1 100644 --- a/include/uapi/drm/panthor_drm.h +++ b/include/uapi/drm/panthor_drm.h @@ -260,6 +260,14 @@ enum drm_panthor_dev_query_type { /** @DRM_PANTHOR_DEV_QUERY_CSIF_INFO: Query command-stream interface information. */ DRM_PANTHOR_DEV_QUERY_CSIF_INFO, + + /** @DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO: Query timestamp information. */ + DRM_PANTHOR_DEV_QUERY_TIMESTAMP_INFO, + + /** + * @DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO: Query allowed group priorities information. + */ + DRM_PANTHOR_DEV_QUERY_GROUP_PRIORITIES_INFO, }; /** @@ -378,6 +386,42 @@ struct drm_panthor_csif_info { }; /** + * struct drm_panthor_timestamp_info - Timestamp information + * + * Structure grouping all queryable information relating to the GPU timestamp. + */ +struct drm_panthor_timestamp_info { + /** + * @timestamp_frequency: The frequency of the timestamp timer or 0 if + * unknown. + */ + __u64 timestamp_frequency; + + /** @current_timestamp: The current timestamp. */ + __u64 current_timestamp; + + /** @timestamp_offset: The offset of the timestamp timer. */ + __u64 timestamp_offset; +}; + +/** + * struct drm_panthor_group_priorities_info - Group priorities information + * + * Structure grouping all queryable information relating to the allowed group priorities. + */ +struct drm_panthor_group_priorities_info { + /** + * @allowed_mask: Bitmask of the allowed group priorities. + * + * Each bit represents a variant of the enum drm_panthor_group_priority. + */ + __u8 allowed_mask; + + /** @pad: Padding fields, MBZ. */ + __u8 pad[3]; +}; + +/** * struct drm_panthor_dev_query - Arguments passed to DRM_PANTHOR_IOCTL_DEV_QUERY */ struct drm_panthor_dev_query { @@ -698,6 +742,13 @@ enum drm_panthor_group_priority { * Requires CAP_SYS_NICE or DRM_MASTER. */ PANTHOR_GROUP_PRIORITY_HIGH, + + /** + * @PANTHOR_GROUP_PRIORITY_REALTIME: Realtime priority group. + * + * Requires CAP_SYS_NICE or DRM_MASTER. + */ + PANTHOR_GROUP_PRIORITY_REALTIME, }; /** diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h index 87fc5bb0a61e..2376c73abca1 100644 --- a/include/uapi/drm/v3d_drm.h +++ b/include/uapi/drm/v3d_drm.h @@ -290,6 +290,7 @@ enum drm_v3d_param { DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT, DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE, DRM_V3D_PARAM_MAX_PERF_COUNTERS, + DRM_V3D_PARAM_SUPPORTS_SUPER_PAGES, }; struct drm_v3d_get_param { diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h index b6fbe4988f2e..4a8a4a63e99c 100644 --- a/include/uapi/drm/xe_drm.h +++ b/include/uapi/drm/xe_drm.h @@ -512,7 +512,9 @@ struct drm_xe_query_gt_list { * containing the following in mask: * ``DSS_COMPUTE ff ff ff ff 00 00 00 00`` * means 32 DSS are available for compute. - * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks + * - %DRM_XE_TOPO_L3_BANK - To query the mask of enabled L3 banks. This type + * may be omitted if the driver is unable to query the mask from the + * hardware. * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU) * available per Dual Sub Slices (DSS). For example a query response * containing the following in mask: @@ -1483,6 +1485,7 @@ struct drm_xe_oa_unit { /** @capabilities: OA capabilities bit-mask */ __u64 capabilities; #define DRM_XE_OA_CAPS_BASE (1 << 0) +#define DRM_XE_OA_CAPS_SYNCS (1 << 1) /** @oa_timestamp_freq: OA timestamp freq */ __u64 oa_timestamp_freq; @@ -1632,6 +1635,22 @@ enum drm_xe_oa_property_id { * to be disabled for the stream exec queue. */ DRM_XE_OA_PROPERTY_NO_PREEMPT, + + /** + * @DRM_XE_OA_PROPERTY_NUM_SYNCS: Number of syncs in the sync array + * specified in @DRM_XE_OA_PROPERTY_SYNCS + */ + DRM_XE_OA_PROPERTY_NUM_SYNCS, + + /** + * @DRM_XE_OA_PROPERTY_SYNCS: Pointer to struct @drm_xe_sync array + * with array size specified via @DRM_XE_OA_PROPERTY_NUM_SYNCS. OA + * configuration will wait till input fences signal. Output fences + * will signal after the new OA configuration takes effect. For + * @DRM_XE_SYNC_TYPE_USER_FENCE, @addr is a user pointer, similar + * to the VM bind case. + */ + DRM_XE_OA_PROPERTY_SYNCS, }; /** diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h index 6e25753015df..439132a819ea 100644 --- a/include/uapi/linux/batadv_packet.h +++ b/include/uapi/linux/batadv_packet.h @@ -9,6 +9,7 @@ #include <asm/byteorder.h> #include <linux/if_ether.h> +#include <linux/stddef.h> #include <linux/types.h> /** @@ -593,19 +594,6 @@ struct batadv_tvlv_gateway_data { }; /** - * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container - * @flags: translation table flags (see batadv_tt_data_flags) - * @ttvn: translation table version number - * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by - * one batadv_tvlv_tt_vlan_data object per announced vlan - */ -struct batadv_tvlv_tt_data { - __u8 flags; - __u8 ttvn; - __be16 num_vlan; -}; - -/** * struct batadv_tvlv_tt_vlan_data - vlan specific tt data propagated through * the tt tvlv container * @crc: crc32 checksum of the entries belonging to this vlan @@ -619,6 +607,21 @@ struct batadv_tvlv_tt_vlan_data { }; /** + * struct batadv_tvlv_tt_data - tt data propagated through the tt tvlv container + * @flags: translation table flags (see batadv_tt_data_flags) + * @ttvn: translation table version number + * @num_vlan: number of announced VLANs. In the TVLV this struct is followed by + * one batadv_tvlv_tt_vlan_data object per announced vlan + * @vlan_data: array of batadv_tvlv_tt_vlan_data objects + */ +struct batadv_tvlv_tt_data { + __u8 flags; + __u8 ttvn; + __be16 num_vlan; + struct batadv_tvlv_tt_vlan_data vlan_data[] __counted_by_be(num_vlan); +}; + +/** * struct batadv_tvlv_tt_change - translation table diff data * @flags: status indicators concerning the non-mesh client (see * batadv_tt_client_flags) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4a939c90dc2e..4162afc6b5d0 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1116,6 +1116,7 @@ enum bpf_attach_type { BPF_NETKIT_PRIMARY, BPF_NETKIT_PEER, BPF_TRACE_KPROBE_SESSION, + BPF_TRACE_UPROBE_SESSION, __MAX_BPF_ATTACH_TYPE }; @@ -1973,6 +1974,8 @@ union bpf_attr { * program. * Return * The SMP id of the processor running the program. + * Attributes + * __bpf_fastcall * * long bpf_skb_store_bytes(struct sk_buff *skb, u32 offset, const void *from, u32 len, u64 flags) * Description @@ -3104,10 +3107,6 @@ union bpf_attr { * with the **CONFIG_BPF_KPROBE_OVERRIDE** configuration * option, and in this case it only works on functions tagged with * **ALLOW_ERROR_INJECTION** in the kernel code. - * - * Also, the helper is only available for the architectures having - * the CONFIG_FUNCTION_ERROR_INJECTION option. As of this writing, - * x86 architecture is the only one to support this feature. * Return * 0 * @@ -5372,7 +5371,7 @@ union bpf_attr { * Currently, the **flags** must be 0. Currently, nr_loops is * limited to 1 << 23 (~8 million) loops. * - * long (\*callback_fn)(u32 index, void \*ctx); + * long (\*callback_fn)(u64 index, void \*ctx); * * where **index** is the current index in the loop. The index * is zero-indexed. diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index cdf6ad872149..d3b222d7af24 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -1049,6 +1049,29 @@ struct btrfs_ioctl_encoded_io_args { #define BTRFS_ENCODED_IO_ENCRYPTION_NONE 0 #define BTRFS_ENCODED_IO_ENCRYPTION_TYPES 1 +/* + * Wait for subvolume cleaning process. This queries the kernel queue and it + * can change between the calls. + * + * - FOR_ONE - specify the subvolid + * - FOR_QUEUED - wait for all currently queued + * - COUNT - count number of queued + * - PEEK_FIRST - read which is the first in the queue (to be cleaned or being + * cleaned already), or 0 if the queue is empty + * - PEEK_LAST - read the last subvolid in the queue, or 0 if the queue is empty + */ +struct btrfs_ioctl_subvol_wait { + __u64 subvolid; + __u32 mode; + __u32 count; +}; + +#define BTRFS_SUBVOL_SYNC_WAIT_FOR_ONE (0) +#define BTRFS_SUBVOL_SYNC_WAIT_FOR_QUEUED (1) +#define BTRFS_SUBVOL_SYNC_COUNT (2) +#define BTRFS_SUBVOL_SYNC_PEEK_FIRST (3) +#define BTRFS_SUBVOL_SYNC_PEEK_LAST (4) + /* Error codes as returned by the kernel */ enum btrfs_err_code { BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, @@ -1181,6 +1204,8 @@ enum btrfs_err_code { struct btrfs_ioctl_encoded_io_args) #define BTRFS_IOC_ENCODED_WRITE _IOW(BTRFS_IOCTL_MAGIC, 64, \ struct btrfs_ioctl_encoded_io_args) +#define BTRFS_IOC_SUBVOL_SYNC_WAIT _IOW(BTRFS_IOCTL_MAGIC, 65, \ + struct btrfs_ioctl_subvol_wait) #ifdef __cplusplus } diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h index 20a6c0fc149e..db05e0419972 100644 --- a/include/uapi/linux/cryptouser.h +++ b/include/uapi/linux/cryptouser.h @@ -64,6 +64,7 @@ enum crypto_attr_type_t { CRYPTOCFGA_STAT_AKCIPHER, /* No longer supported, do not use. */ CRYPTOCFGA_STAT_KPP, /* No longer supported, do not use. */ CRYPTOCFGA_STAT_ACOMP, /* No longer supported, do not use. */ + CRYPTOCFGA_REPORT_SIG, /* struct crypto_report_sig */ __CRYPTOCFGA_MAX #define CRYPTOCFGA_MAX (__CRYPTOCFGA_MAX - 1) @@ -207,6 +208,10 @@ struct crypto_report_acomp { char type[CRYPTO_MAX_NAME]; }; +struct crypto_report_sig { + char type[CRYPTO_MAX_NAME]; +}; + #define CRYPTO_REPORT_MAXSIZE (sizeof(struct crypto_user_alg) + \ sizeof(struct crypto_report_blkcipher)) diff --git a/include/uapi/linux/dpll.h b/include/uapi/linux/dpll.h index b0654ade7b7e..bf97d4b6d51f 100644 --- a/include/uapi/linux/dpll.h +++ b/include/uapi/linux/dpll.h @@ -79,6 +79,29 @@ enum dpll_lock_status_error { DPLL_LOCK_STATUS_ERROR_MAX = (__DPLL_LOCK_STATUS_ERROR_MAX - 1) }; +/* + * level of quality of a clock device. This mainly applies when the dpll + * lock-status is DPLL_LOCK_STATUS_HOLDOVER. The current list is defined + * according to the table 11-7 contained in ITU-T G.8264/Y.1364 document. One + * may extend this list freely by other ITU-T defined clock qualities, or + * different ones defined by another standardization body (for those, please + * use different prefix). + */ +enum dpll_clock_quality_level { + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRC = 1, + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_A, + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_SSU_B, + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEC1, + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_PRTC, + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRTC, + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EEEC, + DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRC, + + /* private: */ + __DPLL_CLOCK_QUALITY_LEVEL_MAX, + DPLL_CLOCK_QUALITY_LEVEL_MAX = (__DPLL_CLOCK_QUALITY_LEVEL_MAX - 1) +}; + #define DPLL_TEMP_DIVIDER 1000 /** @@ -180,6 +203,7 @@ enum dpll_a { DPLL_A_TEMP, DPLL_A_TYPE, DPLL_A_LOCK_STATUS_ERROR, + DPLL_A_CLOCK_QUALITY_LEVEL, __DPLL_A_MAX, DPLL_A_MAX = (__DPLL_A_MAX - 1) diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h index b9935988da5c..b44069d29cec 100644 --- a/include/uapi/linux/elf.h +++ b/include/uapi/linux/elf.h @@ -443,6 +443,7 @@ typedef struct elf64_shdr { #define NT_ARM_ZT 0x40d /* ARM SME ZT registers */ #define NT_ARM_FPMR 0x40e /* ARM floating point mode register */ #define NT_ARM_POE 0x40f /* ARM POE registers */ +#define NT_ARM_GCS 0x410 /* ARM GCS state */ #define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */ #define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */ #define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */ @@ -450,6 +451,7 @@ typedef struct elf64_shdr { #define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */ #define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */ #define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */ +#define NT_RISCV_TAGGED_ADDR_CTRL 0x902 /* RISC-V tagged address control (prctl()) */ #define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */ #define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */ #define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */ diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index c405ed63acfa..7e1b3820f91f 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -2526,12 +2526,19 @@ struct ethtool_link_settings { __u8 master_slave_state; __u8 rate_matching; __u32 reserved[7]; +#ifndef __KERNEL__ + /* Linux builds with -Wflex-array-member-not-at-end but does + * not use the "link_mode_masks" member. Leave it defined for + * userspace for now, and when userspace wants to start using + * -Wfamnae, we'll need a new solution. + */ __u32 link_mode_masks[]; /* layout of link_mode_masks fields: * __u32 map_supported[link_mode_masks_nwords]; * __u32 map_advertising[link_mode_masks_nwords]; * __u32 map_lp_advertising[link_mode_masks_nwords]; */ +#endif }; /** diff --git a/include/uapi/linux/f2fs.h b/include/uapi/linux/f2fs.h index 955d440be104..f7aaf8d23e20 100644 --- a/include/uapi/linux/f2fs.h +++ b/include/uapi/linux/f2fs.h @@ -43,6 +43,7 @@ #define F2FS_IOC_DECOMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 23) #define F2FS_IOC_COMPRESS_FILE _IO(F2FS_IOCTL_MAGIC, 24) #define F2FS_IOC_START_ATOMIC_REPLACE _IO(F2FS_IOCTL_MAGIC, 25) +#define F2FS_IOC_GET_DEV_ALIAS_FILE _IOR(F2FS_IOCTL_MAGIC, 26, __u32) /* * should be same as XFS_IOC_GOINGDOWN. diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h index a37de58ca571..34f221d3a1b9 100644 --- a/include/uapi/linux/fanotify.h +++ b/include/uapi/linux/fanotify.h @@ -60,6 +60,7 @@ #define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */ #define FAN_REPORT_NAME 0x00000800 /* Report events with name */ #define FAN_REPORT_TARGET_FID 0x00001000 /* Report dirent target id */ +#define FAN_REPORT_FD_ERROR 0x00002000 /* event->fd can report error */ /* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */ #define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME) diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h index 87e2dec79fea..6e6907e63bfc 100644 --- a/include/uapi/linux/fcntl.h +++ b/include/uapi/linux/fcntl.h @@ -153,9 +153,6 @@ object identity and may not be usable with open_by_handle_at(2). */ #define AT_HANDLE_MNT_ID_UNIQUE 0x001 /* Return the u64 unique mount ID. */ - -#if defined(__KERNEL__) -#define AT_GETATTR_NOSEC 0x80000000 -#endif +#define AT_HANDLE_CONNECTABLE 0x002 /* Request a connectable file handle */ #endif /* _UAPI_LINUX_FCNTL_H */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index 6dc258993b17..2575e0cd9b48 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -377,6 +377,7 @@ enum { IFLA_GSO_IPV4_MAX_SIZE, IFLA_GRO_IPV4_MAX_SIZE, IFLA_DPLL_PIN, + IFLA_MAX_PACING_OFFLOAD_HORIZON, __IFLA_MAX }; @@ -1292,6 +1293,19 @@ enum netkit_mode { NETKIT_L3, }; +/* NETKIT_SCRUB_NONE leaves clearing skb->{mark,priority} up to + * the BPF program if attached. This also means the latter can + * consume the two fields if they were populated earlier. + * + * NETKIT_SCRUB_DEFAULT zeroes skb->{mark,priority} fields before + * invoking the attached BPF program when the peer device resides + * in a different network namespace. This is the default behavior. + */ +enum netkit_scrub { + NETKIT_SCRUB_NONE, + NETKIT_SCRUB_DEFAULT, +}; + enum { IFLA_NETKIT_UNSPEC, IFLA_NETKIT_PEER_INFO, @@ -1299,6 +1313,8 @@ enum { IFLA_NETKIT_POLICY, IFLA_NETKIT_PEER_POLICY, IFLA_NETKIT_MODE, + IFLA_NETKIT_SCRUB, + IFLA_NETKIT_PEER_SCRUB, __IFLA_NETKIT_MAX, }; #define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) @@ -1942,6 +1958,7 @@ struct ifla_rmnet_flags { enum { IFLA_MCTP_UNSPEC, IFLA_MCTP_NET, + IFLA_MCTP_PHYS_BINDING, __IFLA_MCTP_MAX, }; diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index f2e0b2d50e6b..12886d4465e4 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -51,6 +51,7 @@ enum iio_chan_type { IIO_DELTA_VELOCITY, IIO_COLORTEMP, IIO_CHROMATICITY, + IIO_ATTENTION, }; enum iio_modifier { diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 1fe79e750470..aac9a4f8fa9a 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -200,6 +200,9 @@ enum io_uring_sqe_flags_bit { */ #define IORING_SETUP_NO_SQARRAY (1U << 16) +/* Use hybrid poll in iopoll process */ +#define IORING_SETUP_HYBRID_IOPOLL (1U << 17) + enum io_uring_op { IORING_OP_NOP, IORING_OP_READV, @@ -416,6 +419,9 @@ enum io_uring_msg_ring_flags { * IORING_NOP_INJECT_RESULT Inject result from sqe->result */ #define IORING_NOP_INJECT_RESULT (1U << 0) +#define IORING_NOP_FILE (1U << 1) +#define IORING_NOP_FIXED_FILE (1U << 2) +#define IORING_NOP_FIXED_BUFFER (1U << 3) /* * IO completion data structure (Completion Queue Entry) @@ -518,6 +524,7 @@ struct io_cqring_offsets { #define IORING_ENTER_EXT_ARG (1U << 3) #define IORING_ENTER_REGISTERED_RING (1U << 4) #define IORING_ENTER_ABS_TIMER (1U << 5) +#define IORING_ENTER_EXT_ARG_REG (1U << 6) /* * Passed in for io_uring_setup(2). Copied back with updated info on success @@ -612,6 +619,16 @@ enum io_uring_register_op { /* clone registered buffers from source ring to current ring */ IORING_REGISTER_CLONE_BUFFERS = 30, + /* send MSG_RING without having a ring */ + IORING_REGISTER_SEND_MSG_RING = 31, + + /* 32 reserved for zc rx */ + + /* resize CQ ring */ + IORING_REGISTER_RESIZE_RINGS = 33, + + IORING_REGISTER_MEM_REGION = 34, + /* this goes last */ IORING_REGISTER_LAST, @@ -632,6 +649,31 @@ struct io_uring_files_update { __aligned_u64 /* __s32 * */ fds; }; +enum { + /* initialise with user provided memory pointed by user_addr */ + IORING_MEM_REGION_TYPE_USER = 1, +}; + +struct io_uring_region_desc { + __u64 user_addr; + __u64 size; + __u32 flags; + __u32 id; + __u64 mmap_offset; + __u64 __resv[4]; +}; + +enum { + /* expose the region as registered wait arguments */ + IORING_MEM_REGION_REG_WAIT_ARG = 1, +}; + +struct io_uring_mem_region_reg { + __u64 region_uptr; /* struct io_uring_region_desc * */ + __u64 flags; + __u64 __resv[2]; +}; + /* * Register a fully sparse file space, rather than pass in an array of all * -1 file descriptors. @@ -698,13 +740,17 @@ struct io_uring_clock_register { }; enum { - IORING_REGISTER_SRC_REGISTERED = 1, + IORING_REGISTER_SRC_REGISTERED = (1U << 0), + IORING_REGISTER_DST_REPLACE = (1U << 1), }; struct io_uring_clone_buffers { __u32 src_fd; __u32 flags; - __u32 pad[6]; + __u32 src_off; + __u32 dst_off; + __u32 nr; + __u32 pad[3]; }; struct io_uring_buf { @@ -768,12 +814,40 @@ struct io_uring_buf_status { __u32 resv[8]; }; +enum io_uring_napi_op { + /* register/ungister backward compatible opcode */ + IO_URING_NAPI_REGISTER_OP = 0, + + /* opcodes to update napi_list when static tracking is used */ + IO_URING_NAPI_STATIC_ADD_ID = 1, + IO_URING_NAPI_STATIC_DEL_ID = 2 +}; + +enum io_uring_napi_tracking_strategy { + /* value must be 0 for backward compatibility */ + IO_URING_NAPI_TRACKING_DYNAMIC = 0, + IO_URING_NAPI_TRACKING_STATIC = 1, + IO_URING_NAPI_TRACKING_INACTIVE = 255 +}; + /* argument for IORING_(UN)REGISTER_NAPI */ struct io_uring_napi { __u32 busy_poll_to; __u8 prefer_busy_poll; - __u8 pad[3]; - __u64 resv; + + /* a io_uring_napi_op value */ + __u8 opcode; + __u8 pad[2]; + + /* + * for IO_URING_NAPI_REGISTER_OP, it is a + * io_uring_napi_tracking_strategy value. + * + * for IO_URING_NAPI_STATIC_ADD_ID/IO_URING_NAPI_STATIC_DEL_ID + * it is the napi id to add/del from napi_list. + */ + __u32 op_param; + __u32 resv; }; /* @@ -795,6 +869,29 @@ enum io_uring_register_restriction_op { IORING_RESTRICTION_LAST }; +enum { + IORING_REG_WAIT_TS = (1U << 0), +}; + +/* + * Argument for io_uring_enter(2) with + * IORING_GETEVENTS | IORING_ENTER_EXT_ARG_REG set, where the actual argument + * is an index into a previously registered fixed wait region described by + * the below structure. + */ +struct io_uring_reg_wait { + struct __kernel_timespec ts; + __u32 min_wait_usec; + __u32 flags; + __u64 sigmask; + __u32 sigmask_sz; + __u32 pad[3]; + __u64 pad2[2]; +}; + +/* + * Argument for io_uring_enter(2) with IORING_GETEVENTS | IORING_ENTER_EXT_ARG + */ struct io_uring_getevents_arg { __u64 sigmask; __u32 sigmask_sz; diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index 72010f71c5e4..059b6537f2b7 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -51,6 +51,10 @@ enum { IOMMUFD_CMD_HWPT_GET_DIRTY_BITMAP = 0x8c, IOMMUFD_CMD_HWPT_INVALIDATE = 0x8d, IOMMUFD_CMD_FAULT_QUEUE_ALLOC = 0x8e, + IOMMUFD_CMD_IOAS_MAP_FILE = 0x8f, + IOMMUFD_CMD_VIOMMU_ALLOC = 0x90, + IOMMUFD_CMD_VDEVICE_ALLOC = 0x91, + IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92, }; /** @@ -214,6 +218,30 @@ struct iommu_ioas_map { #define IOMMU_IOAS_MAP _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP) /** + * struct iommu_ioas_map_file - ioctl(IOMMU_IOAS_MAP_FILE) + * @size: sizeof(struct iommu_ioas_map_file) + * @flags: same as for iommu_ioas_map + * @ioas_id: same as for iommu_ioas_map + * @fd: the memfd to map + * @start: byte offset from start of file to map from + * @length: same as for iommu_ioas_map + * @iova: same as for iommu_ioas_map + * + * Set an IOVA mapping from a memfd file. All other arguments and semantics + * match those of IOMMU_IOAS_MAP. + */ +struct iommu_ioas_map_file { + __u32 size; + __u32 flags; + __u32 ioas_id; + __s32 fd; + __aligned_u64 start; + __aligned_u64 length; + __aligned_u64 iova; +}; +#define IOMMU_IOAS_MAP_FILE _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_MAP_FILE) + +/** * struct iommu_ioas_copy - ioctl(IOMMU_IOAS_COPY) * @size: sizeof(struct iommu_ioas_copy) * @flags: Combination of enum iommufd_ioas_map_flags @@ -359,11 +387,19 @@ struct iommu_vfio_ioas { * enforced on device attachment * @IOMMU_HWPT_FAULT_ID_VALID: The fault_id field of hwpt allocation data is * valid. + * @IOMMU_HWPT_ALLOC_PASID: Requests a domain that can be used with PASID. The + * domain can be attached to any PASID on the device. + * Any domain attached to the non-PASID part of the + * device must also be flaged, otherwise attaching a + * PASID will blocked. + * If IOMMU does not support PASID it will return + * error (-EOPNOTSUPP). */ enum iommufd_hwpt_alloc_flags { IOMMU_HWPT_ALLOC_NEST_PARENT = 1 << 0, IOMMU_HWPT_ALLOC_DIRTY_TRACKING = 1 << 1, IOMMU_HWPT_FAULT_ID_VALID = 1 << 2, + IOMMU_HWPT_ALLOC_PASID = 1 << 3, }; /** @@ -395,13 +431,35 @@ struct iommu_hwpt_vtd_s1 { }; /** + * struct iommu_hwpt_arm_smmuv3 - ARM SMMUv3 nested STE + * (IOMMU_HWPT_DATA_ARM_SMMUV3) + * + * @ste: The first two double words of the user space Stream Table Entry for + * the translation. Must be little-endian. + * Allowed fields: (Refer to "5.2 Stream Table Entry" in SMMUv3 HW Spec) + * - word-0: V, Cfg, S1Fmt, S1ContextPtr, S1CDMax + * - word-1: EATS, S1DSS, S1CIR, S1COR, S1CSH, S1STALLD + * + * -EIO will be returned if @ste is not legal or contains any non-allowed field. + * Cfg can be used to select a S1, Bypass or Abort configuration. A Bypass + * nested domain will translate the same as the nesting parent. The S1 will + * install a Context Descriptor Table pointing at userspace memory translated + * by the nesting parent. + */ +struct iommu_hwpt_arm_smmuv3 { + __aligned_le64 ste[2]; +}; + +/** * enum iommu_hwpt_data_type - IOMMU HWPT Data Type * @IOMMU_HWPT_DATA_NONE: no data * @IOMMU_HWPT_DATA_VTD_S1: Intel VT-d stage-1 page table + * @IOMMU_HWPT_DATA_ARM_SMMUV3: ARM SMMUv3 Context Descriptor Table */ enum iommu_hwpt_data_type { IOMMU_HWPT_DATA_NONE = 0, IOMMU_HWPT_DATA_VTD_S1 = 1, + IOMMU_HWPT_DATA_ARM_SMMUV3 = 2, }; /** @@ -409,7 +467,7 @@ enum iommu_hwpt_data_type { * @size: sizeof(struct iommu_hwpt_alloc) * @flags: Combination of enum iommufd_hwpt_alloc_flags * @dev_id: The device to allocate this HWPT for - * @pt_id: The IOAS or HWPT to connect this HWPT to + * @pt_id: The IOAS or HWPT or vIOMMU to connect this HWPT to * @out_hwpt_id: The ID of the new HWPT * @__reserved: Must be 0 * @data_type: One of enum iommu_hwpt_data_type @@ -428,11 +486,13 @@ enum iommu_hwpt_data_type { * IOMMU_HWPT_DATA_NONE. The HWPT can be allocated as a parent HWPT for a * nesting configuration by passing IOMMU_HWPT_ALLOC_NEST_PARENT via @flags. * - * A user-managed nested HWPT will be created from a given parent HWPT via - * @pt_id, in which the parent HWPT must be allocated previously via the - * same ioctl from a given IOAS (@pt_id). In this case, the @data_type - * must be set to a pre-defined type corresponding to an I/O page table - * type supported by the underlying IOMMU hardware. + * A user-managed nested HWPT will be created from a given vIOMMU (wrapping a + * parent HWPT) or a parent HWPT via @pt_id, in which the parent HWPT must be + * allocated previously via the same ioctl from a given IOAS (@pt_id). In this + * case, the @data_type must be set to a pre-defined type corresponding to an + * I/O page table type supported by the underlying IOMMU hardware. The device + * via @dev_id and the vIOMMU via @pt_id must be associated to the same IOMMU + * instance. * * If the @data_type is set to IOMMU_HWPT_DATA_NONE, @data_len and * @data_uptr should be zero. Otherwise, both @data_len and @data_uptr @@ -485,14 +545,49 @@ struct iommu_hw_info_vtd { }; /** + * struct iommu_hw_info_arm_smmuv3 - ARM SMMUv3 hardware information + * (IOMMU_HW_INFO_TYPE_ARM_SMMUV3) + * + * @flags: Must be set to 0 + * @__reserved: Must be 0 + * @idr: Implemented features for ARM SMMU Non-secure programming interface + * @iidr: Information about the implementation and implementer of ARM SMMU, + * and architecture version supported + * @aidr: ARM SMMU architecture version + * + * For the details of @idr, @iidr and @aidr, please refer to the chapters + * from 6.3.1 to 6.3.6 in the SMMUv3 Spec. + * + * User space should read the underlying ARM SMMUv3 hardware information for + * the list of supported features. + * + * Note that these values reflect the raw HW capability, without any insight if + * any required kernel driver support is present. Bits may be set indicating the + * HW has functionality that is lacking kernel software support, such as BTM. If + * a VMM is using this information to construct emulated copies of these + * registers it should only forward bits that it knows it can support. + * + * In future, presence of required kernel support will be indicated in flags. + */ +struct iommu_hw_info_arm_smmuv3 { + __u32 flags; + __u32 __reserved; + __u32 idr[6]; + __u32 iidr; + __u32 aidr; +}; + +/** * enum iommu_hw_info_type - IOMMU Hardware Info Types * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware * info * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type + * @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type */ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_NONE = 0, IOMMU_HW_INFO_TYPE_INTEL_VTD = 1, + IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2, }; /** @@ -627,9 +722,11 @@ struct iommu_hwpt_get_dirty_bitmap { * enum iommu_hwpt_invalidate_data_type - IOMMU HWPT Cache Invalidation * Data Type * @IOMMU_HWPT_INVALIDATE_DATA_VTD_S1: Invalidation data for VTD_S1 + * @IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3: Invalidation data for ARM SMMUv3 */ enum iommu_hwpt_invalidate_data_type { IOMMU_HWPT_INVALIDATE_DATA_VTD_S1 = 0, + IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 = 1, }; /** @@ -669,9 +766,31 @@ struct iommu_hwpt_vtd_s1_invalidate { }; /** + * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation + * (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3) + * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ. + * Must be little-endian. + * + * Supported command list only when passing in a vIOMMU via @hwpt_id: + * CMDQ_OP_TLBI_NSNH_ALL + * CMDQ_OP_TLBI_NH_VA + * CMDQ_OP_TLBI_NH_VAA + * CMDQ_OP_TLBI_NH_ALL + * CMDQ_OP_TLBI_NH_ASID + * CMDQ_OP_ATC_INV + * CMDQ_OP_CFGI_CD + * CMDQ_OP_CFGI_CD_ALL + * + * -EIO will be returned if the command is not supported. + */ +struct iommu_viommu_arm_smmuv3_invalidate { + __aligned_le64 cmd[2]; +}; + +/** * struct iommu_hwpt_invalidate - ioctl(IOMMU_HWPT_INVALIDATE) * @size: sizeof(struct iommu_hwpt_invalidate) - * @hwpt_id: ID of a nested HWPT for cache invalidation + * @hwpt_id: ID of a nested HWPT or a vIOMMU, for cache invalidation * @data_uptr: User pointer to an array of driver-specific cache invalidation * data. * @data_type: One of enum iommu_hwpt_invalidate_data_type, defining the data @@ -682,8 +801,11 @@ struct iommu_hwpt_vtd_s1_invalidate { * Output the number of requests successfully handled by kernel. * @__reserved: Must be 0. * - * Invalidate the iommu cache for user-managed page table. Modifications on a - * user-managed page table should be followed by this operation to sync cache. + * Invalidate iommu cache for user-managed page table or vIOMMU. Modifications + * on a user-managed page table should be followed by this operation, if a HWPT + * is passed in via @hwpt_id. Other caches, such as device cache or descriptor + * cache can be flushed if a vIOMMU is passed in via the @hwpt_id field. + * * Each ioctl can support one or more cache invalidation requests in the array * that has a total size of @entry_len * @entry_num. * @@ -797,4 +919,88 @@ struct iommu_fault_alloc { __u32 out_fault_fd; }; #define IOMMU_FAULT_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_FAULT_QUEUE_ALLOC) + +/** + * enum iommu_viommu_type - Virtual IOMMU Type + * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use + * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type + */ +enum iommu_viommu_type { + IOMMU_VIOMMU_TYPE_DEFAULT = 0, + IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1, +}; + +/** + * struct iommu_viommu_alloc - ioctl(IOMMU_VIOMMU_ALLOC) + * @size: sizeof(struct iommu_viommu_alloc) + * @flags: Must be 0 + * @type: Type of the virtual IOMMU. Must be defined in enum iommu_viommu_type + * @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU + * @hwpt_id: ID of a nesting parent HWPT to associate to + * @out_viommu_id: Output virtual IOMMU ID for the allocated object + * + * Allocate a virtual IOMMU object, representing the underlying physical IOMMU's + * virtualization support that is a security-isolated slice of the real IOMMU HW + * that is unique to a specific VM. Operations global to the IOMMU are connected + * to the vIOMMU, such as: + * - Security namespace for guest owned ID, e.g. guest-controlled cache tags + * - Non-device-affiliated event reporting, e.g. invalidation queue errors + * - Access to a sharable nesting parent pagetable across physical IOMMUs + * - Virtualization of various platforms IDs, e.g. RIDs and others + * - Delivery of paravirtualized invalidation + * - Direct assigned invalidation queues + * - Direct assigned interrupts + */ +struct iommu_viommu_alloc { + __u32 size; + __u32 flags; + __u32 type; + __u32 dev_id; + __u32 hwpt_id; + __u32 out_viommu_id; +}; +#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC) + +/** + * struct iommu_vdevice_alloc - ioctl(IOMMU_VDEVICE_ALLOC) + * @size: sizeof(struct iommu_vdevice_alloc) + * @viommu_id: vIOMMU ID to associate with the virtual device + * @dev_id: The physical device to allocate a virtual instance on the vIOMMU + * @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY + * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID + * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table + * + * Allocate a virtual device instance (for a physical device) against a vIOMMU. + * This instance holds the device's information (related to its vIOMMU) in a VM. + */ +struct iommu_vdevice_alloc { + __u32 size; + __u32 viommu_id; + __u32 dev_id; + __u32 out_vdevice_id; + __aligned_u64 virt_id; +}; +#define IOMMU_VDEVICE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VDEVICE_ALLOC) + +/** + * struct iommu_ioas_change_process - ioctl(VFIO_IOAS_CHANGE_PROCESS) + * @size: sizeof(struct iommu_ioas_change_process) + * @__reserved: Must be 0 + * + * This transfers pinned memory counts for every memory map in every IOAS + * in the context to the current process. This only supports maps created + * with IOMMU_IOAS_MAP_FILE, and returns EINVAL if other maps are present. + * If the ioctl returns a failure status, then nothing is changed. + * + * This API is useful for transferring operation of a device from one process + * to another, such as during userland live update. + */ +struct iommu_ioas_change_process { + __u32 size; + __u32 __reserved; +}; + +#define IOMMU_IOAS_CHANGE_PROCESS \ + _IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_CHANGE_PROCESS) + #endif diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 717307d6b5b7..fa9f9846b88e 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -609,6 +609,7 @@ struct kfd_ioctl_smi_events_args { * migrate_update: GPU page fault is recovered by 'M' for migrate, 'U' for update * rw: 'W' for write page fault, 'R' for read page fault * rescheduled: 'R' if the queue restore failed and rescheduled to try again + * error_code: migrate failure error code, 0 if no error */ #define KFD_EVENT_FMT_UPDATE_GPU_RESET(reset_seq_num, reset_cause)\ "%x %s\n", (reset_seq_num), (reset_cause) @@ -630,9 +631,9 @@ struct kfd_ioctl_smi_events_args { "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", (ns), (pid), (start), (size),\ (from), (to), (prefetch_loc), (preferred_loc), (migrate_trigger) -#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger)\ - "%lld -%d @%lx(%lx) %x->%x %d\n", (ns), (pid), (start), (size),\ - (from), (to), (migrate_trigger) +#define KFD_EVENT_FMT_MIGRATE_END(ns, pid, start, size, from, to, migrate_trigger, error_code) \ + "%lld -%d @%lx(%lx) %x->%x %d %d\n", (ns), (pid), (start), (size),\ + (from), (to), (migrate_trigger), (error_code) #define KFD_EVENT_FMT_QUEUE_EVICTION(ns, pid, node, evict_trigger)\ "%lld -%d %x %d\n", (ns), (pid), (node), (evict_trigger) diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h index 5e8d28617efa..859b8e91d4d3 100644 --- a/include/uapi/linux/kfd_sysfs.h +++ b/include/uapi/linux/kfd_sysfs.h @@ -60,7 +60,8 @@ #define HSA_CAP_FLAGS_COHERENTHOSTACCESS 0x10000000 #define HSA_CAP_TRAP_DEBUG_FIRMWARE_SUPPORTED 0x20000000 #define HSA_CAP_TRAP_DEBUG_PRECISE_ALU_OPERATIONS_SUPPORTED 0x40000000 -#define HSA_CAP_RESERVED 0x800f8000 +#define HSA_CAP_PER_QUEUE_RESET_SUPPORTED 0x80000000 +#define HSA_CAP_RESERVED 0x000f8000 /* debug_prop bits in node properties */ #define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 637efc055145..502ea63b5d2e 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1158,7 +1158,15 @@ enum kvm_device_type { #define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_RISCV_AIA, #define KVM_DEV_TYPE_RISCV_AIA KVM_DEV_TYPE_RISCV_AIA + KVM_DEV_TYPE_LOONGARCH_IPI, +#define KVM_DEV_TYPE_LOONGARCH_IPI KVM_DEV_TYPE_LOONGARCH_IPI + KVM_DEV_TYPE_LOONGARCH_EIOINTC, +#define KVM_DEV_TYPE_LOONGARCH_EIOINTC KVM_DEV_TYPE_LOONGARCH_EIOINTC + KVM_DEV_TYPE_LOONGARCH_PCHPIC, +#define KVM_DEV_TYPE_LOONGARCH_PCHPIC KVM_DEV_TYPE_LOONGARCH_PCHPIC + KVM_DEV_TYPE_MAX, + }; struct kvm_vfio_spapr_tce { diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index d4c1d991014b..ff62056feed5 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -34,7 +34,7 @@ #define MEDIA_BUS_FMT_FIXED 0x0001 -/* RGB - next is 0x1026 */ +/* RGB - next is 0x1028 */ #define MEDIA_BUS_FMT_RGB444_1X12 0x1016 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 @@ -68,6 +68,8 @@ #define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d #define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f #define MEDIA_BUS_FMT_RGB101010_1X30 0x1018 +#define MEDIA_BUS_FMT_RGB101010_1X7X5_SPWG 0x1026 +#define MEDIA_BUS_FMT_RGB101010_1X7X5_JEIDA 0x1027 #define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020 #define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021 #define MEDIA_BUS_FMT_RGB121212_1X36 0x1019 diff --git a/include/uapi/linux/media/raspberrypi/pisp_fe_config.h b/include/uapi/linux/media/raspberrypi/pisp_fe_config.h new file mode 100644 index 000000000000..77237460a3b5 --- /dev/null +++ b/include/uapi/linux/media/raspberrypi/pisp_fe_config.h @@ -0,0 +1,273 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * RP1 PiSP Front End Driver Configuration structures + * + * Copyright (C) 2021 - Raspberry Pi Ltd. + * + */ +#ifndef _UAPI_PISP_FE_CONFIG_ +#define _UAPI_PISP_FE_CONFIG_ + +#include <linux/types.h> + +#include "pisp_common.h" +#include "pisp_fe_statistics.h" + +#define PISP_FE_NUM_OUTPUTS 2 + +enum pisp_fe_enable { + PISP_FE_ENABLE_INPUT = 0x000001, + PISP_FE_ENABLE_DECOMPRESS = 0x000002, + PISP_FE_ENABLE_DECOMPAND = 0x000004, + PISP_FE_ENABLE_BLA = 0x000008, + PISP_FE_ENABLE_DPC = 0x000010, + PISP_FE_ENABLE_STATS_CROP = 0x000020, + PISP_FE_ENABLE_DECIMATE = 0x000040, + PISP_FE_ENABLE_BLC = 0x000080, + PISP_FE_ENABLE_CDAF_STATS = 0x000100, + PISP_FE_ENABLE_AWB_STATS = 0x000200, + PISP_FE_ENABLE_RGBY = 0x000400, + PISP_FE_ENABLE_LSC = 0x000800, + PISP_FE_ENABLE_AGC_STATS = 0x001000, + PISP_FE_ENABLE_CROP0 = 0x010000, + PISP_FE_ENABLE_DOWNSCALE0 = 0x020000, + PISP_FE_ENABLE_COMPRESS0 = 0x040000, + PISP_FE_ENABLE_OUTPUT0 = 0x080000, + PISP_FE_ENABLE_CROP1 = 0x100000, + PISP_FE_ENABLE_DOWNSCALE1 = 0x200000, + PISP_FE_ENABLE_COMPRESS1 = 0x400000, + PISP_FE_ENABLE_OUTPUT1 = 0x800000 +}; + +#define PISP_FE_ENABLE_CROP(i) (PISP_FE_ENABLE_CROP0 << (4 * (i))) +#define PISP_FE_ENABLE_DOWNSCALE(i) (PISP_FE_ENABLE_DOWNSCALE0 << (4 * (i))) +#define PISP_FE_ENABLE_COMPRESS(i) (PISP_FE_ENABLE_COMPRESS0 << (4 * (i))) +#define PISP_FE_ENABLE_OUTPUT(i) (PISP_FE_ENABLE_OUTPUT0 << (4 * (i))) + +/* + * We use the enable flags to show when blocks are "dirty", but we need some + * extra ones too. + */ +enum pisp_fe_dirty { + PISP_FE_DIRTY_GLOBAL = 0x0001, + PISP_FE_DIRTY_FLOATING = 0x0002, + PISP_FE_DIRTY_OUTPUT_AXI = 0x0004 +}; + +struct pisp_fe_global_config { + __u32 enables; + __u8 bayer_order; + __u8 pad[3]; +} __attribute__((packed)); + +struct pisp_fe_input_axi_config { + /* burst length minus one, in the range 0..15; OR'd with flags */ + __u8 maxlen_flags; + /* { prot[2:0], cache[3:0] } fields */ + __u8 cache_prot; + /* QoS (only 4 LS bits are used) */ + __u16 qos; +} __attribute__((packed)); + +struct pisp_fe_output_axi_config { + /* burst length minus one, in the range 0..15; OR'd with flags */ + __u8 maxlen_flags; + /* { prot[2:0], cache[3:0] } fields */ + __u8 cache_prot; + /* QoS (4 bitfields of 4 bits each for different panic levels) */ + __u16 qos; + /* For Panic mode: Output FIFO panic threshold */ + __u16 thresh; + /* For Panic mode: Output FIFO statistics throttle threshold */ + __u16 throttle; +} __attribute__((packed)); + +struct pisp_fe_input_config { + __u8 streaming; + __u8 pad[3]; + struct pisp_image_format_config format; + struct pisp_fe_input_axi_config axi; + /* Extra cycles delay before issuing each burst request */ + __u8 holdoff; + __u8 pad2[3]; +} __attribute__((packed)); + +struct pisp_fe_output_config { + struct pisp_image_format_config format; + __u16 ilines; + __u8 pad[2]; +} __attribute__((packed)); + +struct pisp_fe_input_buffer_config { + __u32 addr_lo; + __u32 addr_hi; + __u16 frame_id; + __u16 pad; +} __attribute__((packed)); + +#define PISP_FE_DECOMPAND_LUT_SIZE 65 + +struct pisp_fe_decompand_config { + __u16 lut[PISP_FE_DECOMPAND_LUT_SIZE]; + __u16 pad; +} __attribute__((packed)); + +struct pisp_fe_dpc_config { + __u8 coeff_level; + __u8 coeff_range; + __u8 coeff_range2; +#define PISP_FE_DPC_FLAG_FOLDBACK 1 +#define PISP_FE_DPC_FLAG_VFLAG 2 + __u8 flags; +} __attribute__((packed)); + +#define PISP_FE_LSC_LUT_SIZE 16 + +struct pisp_fe_lsc_config { + __u8 shift; + __u8 pad0; + __u16 scale; + __u16 centre_x; + __u16 centre_y; + __u16 lut[PISP_FE_LSC_LUT_SIZE]; +} __attribute__((packed)); + +struct pisp_fe_rgby_config { + __u16 gain_r; + __u16 gain_g; + __u16 gain_b; + __u8 maxflag; + __u8 pad; +} __attribute__((packed)); + +struct pisp_fe_agc_stats_config { + __u16 offset_x; + __u16 offset_y; + __u16 size_x; + __u16 size_y; + /* each weight only 4 bits */ + __u8 weights[PISP_AGC_STATS_NUM_ZONES / 2]; + __u16 row_offset_x; + __u16 row_offset_y; + __u16 row_size_x; + __u16 row_size_y; + __u8 row_shift; + __u8 float_shift; + __u8 pad1[2]; +} __attribute__((packed)); + +struct pisp_fe_awb_stats_config { + __u16 offset_x; + __u16 offset_y; + __u16 size_x; + __u16 size_y; + __u8 shift; + __u8 pad[3]; + __u16 r_lo; + __u16 r_hi; + __u16 g_lo; + __u16 g_hi; + __u16 b_lo; + __u16 b_hi; +} __attribute__((packed)); + +struct pisp_fe_floating_stats_region { + __u16 offset_x; + __u16 offset_y; + __u16 size_x; + __u16 size_y; +} __attribute__((packed)); + +struct pisp_fe_floating_stats_config { + struct pisp_fe_floating_stats_region + regions[PISP_FLOATING_STATS_NUM_ZONES]; +} __attribute__((packed)); + +#define PISP_FE_CDAF_NUM_WEIGHTS 8 + +struct pisp_fe_cdaf_stats_config { + __u16 noise_constant; + __u16 noise_slope; + __u16 offset_x; + __u16 offset_y; + __u16 size_x; + __u16 size_y; + __u16 skip_x; + __u16 skip_y; + __u32 mode; +} __attribute__((packed)); + +struct pisp_fe_stats_buffer_config { + __u32 addr_lo; + __u32 addr_hi; +} __attribute__((packed)); + +struct pisp_fe_crop_config { + __u16 offset_x; + __u16 offset_y; + __u16 width; + __u16 height; +} __attribute__((packed)); + +enum pisp_fe_downscale_flags { + /* downscale the four Bayer components independently... */ + DOWNSCALE_BAYER = 1, + /* ...without trying to preserve their spatial relationship */ + DOWNSCALE_BIN = 2, +}; + +struct pisp_fe_downscale_config { + __u8 xin; + __u8 xout; + __u8 yin; + __u8 yout; + __u8 flags; /* enum pisp_fe_downscale_flags */ + __u8 pad[3]; + __u16 output_width; + __u16 output_height; +} __attribute__((packed)); + +struct pisp_fe_output_buffer_config { + __u32 addr_lo; + __u32 addr_hi; +} __attribute__((packed)); + +/* Each of the two output channels/branches: */ +struct pisp_fe_output_branch_config { + struct pisp_fe_crop_config crop; + struct pisp_fe_downscale_config downscale; + struct pisp_compress_config compress; + struct pisp_fe_output_config output; + __u32 pad; +} __attribute__((packed)); + +/* And finally one to rule them all: */ +struct pisp_fe_config { + /* I/O configuration: */ + struct pisp_fe_stats_buffer_config stats_buffer; + struct pisp_fe_output_buffer_config output_buffer[PISP_FE_NUM_OUTPUTS]; + struct pisp_fe_input_buffer_config input_buffer; + /* processing configuration: */ + struct pisp_fe_global_config global; + struct pisp_fe_input_config input; + struct pisp_decompress_config decompress; + struct pisp_fe_decompand_config decompand; + struct pisp_bla_config bla; + struct pisp_fe_dpc_config dpc; + struct pisp_fe_crop_config stats_crop; + __u32 spare1; /* placeholder for future decimate configuration */ + struct pisp_bla_config blc; + struct pisp_fe_rgby_config rgby; + struct pisp_fe_lsc_config lsc; + struct pisp_fe_agc_stats_config agc_stats; + struct pisp_fe_awb_stats_config awb_stats; + struct pisp_fe_cdaf_stats_config cdaf_stats; + struct pisp_fe_floating_stats_config floating_stats; + struct pisp_fe_output_axi_config output_axi; + struct pisp_fe_output_branch_config ch[PISP_FE_NUM_OUTPUTS]; + /* non-register fields: */ + __u32 dirty_flags; /* these use pisp_fe_enable */ + __u32 dirty_flags_extra; /* these use pisp_fe_dirty */ +} __attribute__((packed)); + +#endif /* _UAPI_PISP_FE_CONFIG_ */ diff --git a/include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h b/include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h new file mode 100644 index 000000000000..a7d42985aee8 --- /dev/null +++ b/include/uapi/linux/media/raspberrypi/pisp_fe_statistics.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * RP1 PiSP Front End statistics definitions + * + * Copyright (C) 2021 - Raspberry Pi Ltd. + * + */ +#ifndef _UAPI_PISP_FE_STATISTICS_H_ +#define _UAPI_PISP_FE_STATISTICS_H_ + +#include <linux/types.h> + +#define PISP_FLOATING_STATS_NUM_ZONES 4 +#define PISP_AGC_STATS_NUM_BINS 1024 +#define PISP_AGC_STATS_SIZE 16 +#define PISP_AGC_STATS_NUM_ZONES (PISP_AGC_STATS_SIZE * PISP_AGC_STATS_SIZE) +#define PISP_AGC_STATS_NUM_ROW_SUMS 512 + +struct pisp_agc_statistics_zone { + __u64 Y_sum; + __u32 counted; + __u32 pad; +} __attribute__((packed)); + +struct pisp_agc_statistics { + __u32 row_sums[PISP_AGC_STATS_NUM_ROW_SUMS]; + /* + * 32-bits per bin means an image (just less than) 16384x16384 pixels + * in size can weight every pixel from 0 to 15. + */ + __u32 histogram[PISP_AGC_STATS_NUM_BINS]; + struct pisp_agc_statistics_zone floating[PISP_FLOATING_STATS_NUM_ZONES]; +} __attribute__((packed)); + +#define PISP_AWB_STATS_SIZE 32 +#define PISP_AWB_STATS_NUM_ZONES (PISP_AWB_STATS_SIZE * PISP_AWB_STATS_SIZE) + +struct pisp_awb_statistics_zone { + __u32 R_sum; + __u32 G_sum; + __u32 B_sum; + __u32 counted; +} __attribute__((packed)); + +struct pisp_awb_statistics { + struct pisp_awb_statistics_zone zones[PISP_AWB_STATS_NUM_ZONES]; + struct pisp_awb_statistics_zone floating[PISP_FLOATING_STATS_NUM_ZONES]; +} __attribute__((packed)); + +#define PISP_CDAF_STATS_SIZE 8 +#define PISP_CDAF_STATS_NUM_FOMS (PISP_CDAF_STATS_SIZE * PISP_CDAF_STATS_SIZE) + +struct pisp_cdaf_statistics { + __u64 foms[PISP_CDAF_STATS_NUM_FOMS]; + __u64 floating[PISP_FLOATING_STATS_NUM_ZONES]; +} __attribute__((packed)); + +struct pisp_statistics { + struct pisp_awb_statistics awb; + struct pisp_agc_statistics agc; + struct pisp_cdaf_statistics cdaf; +} __attribute__((packed)); + +#endif /* _UAPI_PISP_FE_STATISTICS_H_ */ diff --git a/include/uapi/linux/mount.h b/include/uapi/linux/mount.h index 225bc366ffcb..c07008816aca 100644 --- a/include/uapi/linux/mount.h +++ b/include/uapi/linux/mount.h @@ -154,7 +154,7 @@ struct mount_attr { */ struct statmount { __u32 size; /* Total size, including strings */ - __u32 mnt_opts; /* [str] Mount options of the mount */ + __u32 mnt_opts; /* [str] Options (comma separated, escaped) */ __u64 mask; /* What results were written */ __u32 sb_dev_major; /* Device ID */ __u32 sb_dev_minor; @@ -173,7 +173,13 @@ struct statmount { __u32 mnt_root; /* [str] Root of mount relative to root of fs */ __u32 mnt_point; /* [str] Mountpoint relative to current root */ __u64 mnt_ns_id; /* ID of the mount namespace */ - __u64 __spare2[49]; + __u32 fs_subtype; /* [str] Subtype of fs_type (if any) */ + __u32 sb_source; /* [str] Source string of the mount */ + __u32 opt_num; /* Number of fs options */ + __u32 opt_array; /* [str] Array of nul terminated fs options */ + __u32 opt_sec_num; /* Number of security options */ + __u32 opt_sec_array; /* [str] Array of nul terminated security options */ + __u64 __spare2[46]; char str[]; /* Variable size part containing strings */ }; @@ -207,6 +213,10 @@ struct mnt_id_req { #define STATMOUNT_FS_TYPE 0x00000020U /* Want/got fs_type */ #define STATMOUNT_MNT_NS_ID 0x00000040U /* Want/got mnt_ns_id */ #define STATMOUNT_MNT_OPTS 0x00000080U /* Want/got mnt_opts */ +#define STATMOUNT_FS_SUBTYPE 0x00000100U /* Want/got fs_subtype */ +#define STATMOUNT_SB_SOURCE 0x00000200U /* Want/got sb_source */ +#define STATMOUNT_OPT_ARRAY 0x00000400U /* Want/got opt_... */ +#define STATMOUNT_OPT_SEC_ARRAY 0x00000800U /* Want/got opt_sec... */ /* * Special @mnt_id values that can be passed to listmount diff --git a/include/uapi/linux/net_shaper.h b/include/uapi/linux/net_shaper.h new file mode 100644 index 000000000000..d8834b59f7d7 --- /dev/null +++ b/include/uapi/linux/net_shaper.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ +/* Do not edit directly, auto-generated from: */ +/* Documentation/netlink/specs/net_shaper.yaml */ +/* YNL-GEN uapi header */ + +#ifndef _UAPI_LINUX_NET_SHAPER_H +#define _UAPI_LINUX_NET_SHAPER_H + +#define NET_SHAPER_FAMILY_NAME "net-shaper" +#define NET_SHAPER_FAMILY_VERSION 1 + +/** + * enum net_shaper_scope - Defines the shaper @id interpretation. + * @NET_SHAPER_SCOPE_UNSPEC: The scope is not specified. + * @NET_SHAPER_SCOPE_NETDEV: The main shaper for the given network device. + * @NET_SHAPER_SCOPE_QUEUE: The shaper is attached to the given device queue, + * the @id represents the queue number. + * @NET_SHAPER_SCOPE_NODE: The shaper allows grouping of queues or other node + * shapers; can be nested in either @netdev shapers or other @node shapers, + * allowing placement in any location of the scheduling tree, except leaves + * and root. + */ +enum net_shaper_scope { + NET_SHAPER_SCOPE_UNSPEC, + NET_SHAPER_SCOPE_NETDEV, + NET_SHAPER_SCOPE_QUEUE, + NET_SHAPER_SCOPE_NODE, + + /* private: */ + __NET_SHAPER_SCOPE_MAX, + NET_SHAPER_SCOPE_MAX = (__NET_SHAPER_SCOPE_MAX - 1) +}; + +/** + * enum net_shaper_metric - Different metric supported by the shaper. + * @NET_SHAPER_METRIC_BPS: Shaper operates on a bits per second basis. + * @NET_SHAPER_METRIC_PPS: Shaper operates on a packets per second basis. + */ +enum net_shaper_metric { + NET_SHAPER_METRIC_BPS, + NET_SHAPER_METRIC_PPS, +}; + +enum { + NET_SHAPER_A_HANDLE = 1, + NET_SHAPER_A_METRIC, + NET_SHAPER_A_BW_MIN, + NET_SHAPER_A_BW_MAX, + NET_SHAPER_A_BURST, + NET_SHAPER_A_PRIORITY, + NET_SHAPER_A_WEIGHT, + NET_SHAPER_A_IFINDEX, + NET_SHAPER_A_PARENT, + NET_SHAPER_A_LEAVES, + + __NET_SHAPER_A_MAX, + NET_SHAPER_A_MAX = (__NET_SHAPER_A_MAX - 1) +}; + +enum { + NET_SHAPER_A_HANDLE_SCOPE = 1, + NET_SHAPER_A_HANDLE_ID, + + __NET_SHAPER_A_HANDLE_MAX, + NET_SHAPER_A_HANDLE_MAX = (__NET_SHAPER_A_HANDLE_MAX - 1) +}; + +enum { + NET_SHAPER_A_CAPS_IFINDEX = 1, + NET_SHAPER_A_CAPS_SCOPE, + NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS, + NET_SHAPER_A_CAPS_SUPPORT_METRIC_PPS, + NET_SHAPER_A_CAPS_SUPPORT_NESTING, + NET_SHAPER_A_CAPS_SUPPORT_BW_MIN, + NET_SHAPER_A_CAPS_SUPPORT_BW_MAX, + NET_SHAPER_A_CAPS_SUPPORT_BURST, + NET_SHAPER_A_CAPS_SUPPORT_PRIORITY, + NET_SHAPER_A_CAPS_SUPPORT_WEIGHT, + + __NET_SHAPER_A_CAPS_MAX, + NET_SHAPER_A_CAPS_MAX = (__NET_SHAPER_A_CAPS_MAX - 1) +}; + +enum { + NET_SHAPER_CMD_GET = 1, + NET_SHAPER_CMD_SET, + NET_SHAPER_CMD_DELETE, + NET_SHAPER_CMD_GROUP, + NET_SHAPER_CMD_CAP_GET, + + __NET_SHAPER_CMD_MAX, + NET_SHAPER_CMD_MAX = (__NET_SHAPER_CMD_MAX - 1) +}; + +#endif /* _UAPI_LINUX_NET_SHAPER_H */ diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index 7c308f04e7a0..e4be227d3ad6 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -122,6 +122,9 @@ enum { NETDEV_A_NAPI_ID, NETDEV_A_NAPI_IRQ, NETDEV_A_NAPI_PID, + NETDEV_A_NAPI_DEFER_HARD_IRQS, + NETDEV_A_NAPI_GRO_FLUSH_TIMEOUT, + NETDEV_A_NAPI_IRQ_SUSPEND_TIMEOUT, __NETDEV_A_NAPI_MAX, NETDEV_A_NAPI_MAX = (__NETDEV_A_NAPI_MAX - 1) @@ -199,6 +202,7 @@ enum { NETDEV_CMD_NAPI_GET, NETDEV_CMD_QSTATS_GET, NETDEV_CMD_BIND_RX, + NETDEV_CMD_NAPI_SET, __NETDEV_CMD_MAX, NETDEV_CMD_MAX = (__NETDEV_CMD_MAX - 1) diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index 9e9079321380..49c944e78463 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -564,16 +564,26 @@ enum nft_immediate_attributes { /** * enum nft_bitwise_ops - nf_tables bitwise operations * - * @NFT_BITWISE_BOOL: mask-and-xor operation used to implement NOT, AND, OR and - * XOR boolean operations + * @NFT_BITWISE_MASK_XOR: mask-and-xor operation used to implement NOT, AND, OR + * and XOR boolean operations * @NFT_BITWISE_LSHIFT: left-shift operation * @NFT_BITWISE_RSHIFT: right-shift operation + * @NFT_BITWISE_AND: and operation + * @NFT_BITWISE_OR: or operation + * @NFT_BITWISE_XOR: xor operation */ enum nft_bitwise_ops { - NFT_BITWISE_BOOL, + NFT_BITWISE_MASK_XOR, NFT_BITWISE_LSHIFT, NFT_BITWISE_RSHIFT, + NFT_BITWISE_AND, + NFT_BITWISE_OR, + NFT_BITWISE_XOR, }; +/* + * Old name for NFT_BITWISE_MASK_XOR. Retained for backwards-compatibility. + */ +#define NFT_BITWISE_BOOL NFT_BITWISE_MASK_XOR /** * enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes @@ -586,6 +596,7 @@ enum nft_bitwise_ops { * @NFTA_BITWISE_OP: type of operation (NLA_U32: nft_bitwise_ops) * @NFTA_BITWISE_DATA: argument for non-boolean operations * (NLA_NESTED: nft_data_attributes) + * @NFTA_BITWISE_SREG2: second source register (NLA_U32: nft_registers) * * The bitwise expression supports boolean and shift operations. It implements * the boolean operations by performing the following operation: @@ -609,6 +620,7 @@ enum nft_bitwise_attributes { NFTA_BITWISE_XOR, NFTA_BITWISE_OP, NFTA_BITWISE_DATA, + NFTA_BITWISE_SREG2, __NFTA_BITWISE_MAX }; #define NFTA_BITWISE_MAX (__NFTA_BITWISE_MAX - 1) diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h index 4fa4e979e948..2f5b4be25261 100644 --- a/include/uapi/linux/nfc.h +++ b/include/uapi/linux/nfc.h @@ -164,6 +164,7 @@ enum nfc_commands { * @NFC_ATTR_VENDOR_SUBCMD: Vendor specific sub command * @NFC_ATTR_VENDOR_DATA: Vendor specific data, to be optionally passed * to a vendor specific command implementation + * @NFC_ATTR_TARGET_ATS: ISO 14443 type A target Answer To Select */ enum nfc_attrs { NFC_ATTR_UNSPEC, @@ -198,6 +199,7 @@ enum nfc_attrs { NFC_ATTR_VENDOR_ID, NFC_ATTR_VENDOR_SUBCMD, NFC_ATTR_VENDOR_DATA, + NFC_ATTR_TARGET_ATS, /* private: internal use only */ __NFC_ATTR_AFTER_LAST }; @@ -225,6 +227,7 @@ enum nfc_sdp_attr { #define NFC_GB_MAXSIZE 48 #define NFC_FIRMWARE_NAME_MAXSIZE 32 #define NFC_ISO15693_UID_MAXSIZE 8 +#define NFC_ATS_MAXSIZE 20 /* NFC protocols */ #define NFC_PROTO_JEWEL 1 diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f97f5adc8d51..6d11437596b9 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2868,6 +2868,9 @@ enum nl80211_commands { * nested item, it contains attributes defined in * &enum nl80211_if_combination_attrs. * + * @NL80211_ATTR_VIF_RADIO_MASK: Bitmask of allowed radios (u32). + * A value of 0 means all radios. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3416,6 +3419,8 @@ enum nl80211_attrs { NL80211_ATTR_WIPHY_RADIOS, NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS, + NL80211_ATTR_VIF_RADIO_MASK, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4698,6 +4703,7 @@ enum nl80211_survey_info { * overrides all other flags. * @NL80211_MNTR_FLAG_ACTIVE: use the configured MAC address * and ACK incoming unicast packets. + * @NL80211_MNTR_FLAG_SKIP_TX: do not pass local tx packets * * @__NL80211_MNTR_FLAG_AFTER_LAST: internal use * @NL80211_MNTR_FLAG_MAX: highest possible monitor flag @@ -4710,6 +4716,7 @@ enum nl80211_mntr_flags { NL80211_MNTR_FLAG_OTHER_BSS, NL80211_MNTR_FLAG_COOK_FRAMES, NL80211_MNTR_FLAG_ACTIVE, + NL80211_MNTR_FLAG_SKIP_TX, /* keep last */ __NL80211_MNTR_FLAG_AFTER_LAST, @@ -8031,6 +8038,8 @@ enum nl80211_ap_settings_flags { * @NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION: Supported interface * combination for this radio. Attribute may be present multiple times * and contains attributes defined in &enum nl80211_if_combination_attrs. + * @NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK: bitmask (u32) of antennas + * connected to this radio. * * @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal * @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute @@ -8041,6 +8050,7 @@ enum nl80211_wiphy_radio_attrs { NL80211_WIPHY_RADIO_ATTR_INDEX, NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE, NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION, + NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK, /* keep last */ __NL80211_WIPHY_RADIO_ATTR_LAST, diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index 12323b3334a9..1601c7ed5fab 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -340,7 +340,8 @@ #define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */ #define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */ #define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */ -#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001 +#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001 /* Mask Bit */ +#define PCI_MSIX_ENTRY_CTRL_ST 0xffff0000 /* Steering Tag */ /* CompactPCI Hotswap Register */ @@ -659,6 +660,7 @@ #define PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */ #define PCI_EXP_DEVCAP2_ATOMIC_COMP128 0x00000200 /* 128b AtomicOp completion */ #define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */ +#define PCI_EXP_DEVCAP2_TPH_COMP_MASK 0x00003000 /* TPH completer support */ #define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */ #define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */ #define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */ @@ -678,6 +680,7 @@ #define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */ #define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */ #define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */ +#define PCI_EXP_LNKCAP2_SLS 0x000000fe /* Supported Link Speeds Vector */ #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */ #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */ #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */ @@ -1023,15 +1026,34 @@ #define PCI_DPA_CAP_SUBSTATE_MASK 0x1F /* # substates - 1 */ #define PCI_DPA_BASE_SIZEOF 16 /* size with 0 substates */ +/* TPH Completer Support */ +#define PCI_EXP_DEVCAP2_TPH_COMP_NONE 0x0 /* None */ +#define PCI_EXP_DEVCAP2_TPH_COMP_TPH_ONLY 0x1 /* TPH only */ +#define PCI_EXP_DEVCAP2_TPH_COMP_EXT_TPH 0x3 /* TPH and Extended TPH */ + /* TPH Requester */ #define PCI_TPH_CAP 4 /* capability register */ -#define PCI_TPH_CAP_LOC_MASK 0x600 /* location mask */ -#define PCI_TPH_LOC_NONE 0x000 /* no location */ -#define PCI_TPH_LOC_CAP 0x200 /* in capability */ -#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */ -#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */ -#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */ -#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */ +#define PCI_TPH_CAP_ST_NS 0x00000001 /* No ST Mode Supported */ +#define PCI_TPH_CAP_ST_IV 0x00000002 /* Interrupt Vector Mode Supported */ +#define PCI_TPH_CAP_ST_DS 0x00000004 /* Device Specific Mode Supported */ +#define PCI_TPH_CAP_EXT_TPH 0x00000100 /* Ext TPH Requester Supported */ +#define PCI_TPH_CAP_LOC_MASK 0x00000600 /* ST Table Location */ +#define PCI_TPH_LOC_NONE 0x00000000 /* Not present */ +#define PCI_TPH_LOC_CAP 0x00000200 /* In capability */ +#define PCI_TPH_LOC_MSIX 0x00000400 /* In MSI-X */ +#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST Table Size */ +#define PCI_TPH_CAP_ST_SHIFT 16 /* ST Table Size shift */ +#define PCI_TPH_BASE_SIZEOF 0xc /* Size with no ST table */ + +#define PCI_TPH_CTRL 8 /* control register */ +#define PCI_TPH_CTRL_MODE_SEL_MASK 0x00000007 /* ST Mode Select */ +#define PCI_TPH_ST_NS_MODE 0x0 /* No ST Mode */ +#define PCI_TPH_ST_IV_MODE 0x1 /* Interrupt Vector Mode */ +#define PCI_TPH_ST_DS_MODE 0x2 /* Device Specific Mode */ +#define PCI_TPH_CTRL_REQ_EN_MASK 0x00000300 /* TPH Requester Enable */ +#define PCI_TPH_REQ_DISABLE 0x0 /* No TPH requests allowed */ +#define PCI_TPH_REQ_TPH_ONLY 0x1 /* TPH only requests allowed */ +#define PCI_TPH_REQ_EXT_TPH 0x3 /* Extended TPH requests allowed */ /* Downstream Port Containment */ #define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */ diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 4842c36fdf80..0524d541d4e3 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -511,7 +511,16 @@ struct perf_event_attr { __u16 sample_max_stack; __u16 __reserved_2; __u32 aux_sample_size; - __u32 __reserved_3; + + union { + __u32 aux_action; + struct { + __u32 aux_start_paused : 1, /* start AUX area tracing paused */ + aux_pause : 1, /* on overflow, pause AUX area tracing */ + aux_resume : 1, /* on overflow, resume AUX area tracing */ + __reserved_3 : 29; + }; + }; /* * User provided data if sigtrap=1, passed back to user via diff --git a/include/uapi/linux/pidfd.h b/include/uapi/linux/pidfd.h index 565fc0629fff..4540f6301b8c 100644 --- a/include/uapi/linux/pidfd.h +++ b/include/uapi/linux/pidfd.h @@ -16,6 +16,55 @@ #define PIDFD_SIGNAL_THREAD_GROUP (1UL << 1) #define PIDFD_SIGNAL_PROCESS_GROUP (1UL << 2) +/* Flags for pidfd_info. */ +#define PIDFD_INFO_PID (1UL << 0) /* Always returned, even if not requested */ +#define PIDFD_INFO_CREDS (1UL << 1) /* Always returned, even if not requested */ +#define PIDFD_INFO_CGROUPID (1UL << 2) /* Always returned if available, even if not requested */ + +#define PIDFD_INFO_SIZE_VER0 64 /* sizeof first published struct */ + +struct pidfd_info { + /* + * This mask is similar to the request_mask in statx(2). + * + * Userspace indicates what extensions or expensive-to-calculate fields + * they want by setting the corresponding bits in mask. The kernel + * will ignore bits that it does not know about. + * + * When filling the structure, the kernel will only set bits + * corresponding to the fields that were actually filled by the kernel. + * This also includes any future extensions that might be automatically + * filled. If the structure size is too small to contain a field + * (requested or not), to avoid confusion the mask will not + * contain a bit for that field. + * + * As such, userspace MUST verify that mask contains the + * corresponding flags after the ioctl(2) returns to ensure that it is + * using valid data. + */ + __u64 mask; + /* + * The information contained in the following fields might be stale at the + * time it is received, as the target process might have exited as soon as + * the IOCTL was processed, and there is no way to avoid that. However, it + * is guaranteed that if the call was successful, then the information was + * correct and referred to the intended process at the time the work was + * performed. */ + __u64 cgroupid; + __u32 pid; + __u32 tgid; + __u32 ppid; + __u32 ruid; + __u32 rgid; + __u32 euid; + __u32 egid; + __u32 suid; + __u32 sgid; + __u32 fsuid; + __u32 fsgid; + __u32 spare0[1]; +}; + #define PIDFS_IOCTL_MAGIC 0xFF #define PIDFD_GET_CGROUP_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 1) @@ -28,5 +77,6 @@ #define PIDFD_GET_TIME_FOR_CHILDREN_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 8) #define PIDFD_GET_USER_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 9) #define PIDFD_GET_UTS_NAMESPACE _IO(PIDFS_IOCTL_MAGIC, 10) +#define PIDFD_GET_INFO _IOWR(PIDFS_IOCTL_MAGIC, 11, struct pidfd_info) #endif /* _UAPI_LINUX_PIDFD_H */ diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index a3cd0c2dc995..25a9a47001cd 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h @@ -836,6 +836,8 @@ enum { TCA_FQ_WEIGHTS, /* Weights for each band */ + TCA_FQ_OFFLOAD_HORIZON, /* dequeue paced packets within this horizon immediately (us units) */ + __TCA_FQ_MAX }; diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 35791791a879..5c6080680cb2 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -230,7 +230,7 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) -/* Tagged user address controls for arm64 */ +/* Tagged user address controls for arm64 and RISC-V */ #define PR_SET_TAGGED_ADDR_CTRL 55 #define PR_GET_TAGGED_ADDR_CTRL 56 # define PR_TAGGED_ADDR_ENABLE (1UL << 0) @@ -244,6 +244,9 @@ struct prctl_mm_map { # define PR_MTE_TAG_MASK (0xffffUL << PR_MTE_TAG_SHIFT) /* Unused; kept only for source compatibility */ # define PR_MTE_TCF_SHIFT 1 +/* RISC-V pointer masking tag length */ +# define PR_PMLEN_SHIFT 24 +# define PR_PMLEN_MASK (0x7fUL << PR_PMLEN_SHIFT) /* Control reclaim behavior when allocating memory */ #define PR_SET_IO_FLUSHER 57 @@ -328,4 +331,26 @@ struct prctl_mm_map { # define PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC 0x10 /* Clear the aspect on exec */ # define PR_PPC_DEXCR_CTRL_MASK 0x1f +/* + * Get the current shadow stack configuration for the current thread, + * this will be the value configured via PR_SET_SHADOW_STACK_STATUS. + */ +#define PR_GET_SHADOW_STACK_STATUS 74 + +/* + * Set the current shadow stack configuration. Enabling the shadow + * stack will cause a shadow stack to be allocated for the thread. + */ +#define PR_SET_SHADOW_STACK_STATUS 75 +# define PR_SHADOW_STACK_ENABLE (1UL << 0) +# define PR_SHADOW_STACK_WRITE (1UL << 1) +# define PR_SHADOW_STACK_PUSH (1UL << 2) + +/* + * Prevent further changes to the specified shadow stack + * configuration. All bits may be locked via this call, including + * undefined bits. + */ +#define PR_LOCK_SHADOW_STACK_STATUS 76 + #endif /* _LINUX_PRCTL_H */ diff --git a/include/uapi/linux/psci.h b/include/uapi/linux/psci.h index 42a40ad3fb62..81759ff385e6 100644 --- a/include/uapi/linux/psci.h +++ b/include/uapi/linux/psci.h @@ -59,6 +59,7 @@ #define PSCI_1_1_FN_SYSTEM_RESET2 PSCI_0_2_FN(18) #define PSCI_1_1_FN_MEM_PROTECT PSCI_0_2_FN(19) #define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN(20) +#define PSCI_1_3_FN_SYSTEM_OFF2 PSCI_0_2_FN(21) #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND PSCI_0_2_FN64(12) #define PSCI_1_0_FN64_NODE_HW_STATE PSCI_0_2_FN64(13) @@ -68,6 +69,7 @@ #define PSCI_1_1_FN64_SYSTEM_RESET2 PSCI_0_2_FN64(18) #define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE PSCI_0_2_FN64(20) +#define PSCI_1_3_FN64_SYSTEM_OFF2 PSCI_0_2_FN64(21) /* PSCI v0.2 power state encoding for CPU_SUSPEND function */ #define PSCI_0_2_POWER_STATE_ID_MASK 0xffff @@ -100,6 +102,9 @@ #define PSCI_1_1_RESET_TYPE_SYSTEM_WARM_RESET 0 #define PSCI_1_1_RESET_TYPE_VENDOR_START 0x80000000U +/* PSCI v1.3 hibernate type for SYSTEM_OFF2 */ +#define PSCI_1_3_OFF_TYPE_HIBERNATE_OFF BIT(0) + /* PSCI version decoding (independent of PSCI version) */ #define PSCI_VERSION_MAJOR_SHIFT 16 #define PSCI_VERSION_MINOR_MASK \ diff --git a/include/uapi/linux/reiserfs_fs.h b/include/uapi/linux/reiserfs_fs.h deleted file mode 100644 index 5bb921409f2b..000000000000 --- a/include/uapi/linux/reiserfs_fs.h +++ /dev/null @@ -1,27 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright 1996, 1997, 1998 Hans Reiser, see reiserfs/README for licensing and copyright details - */ -#ifndef _LINUX_REISER_FS_H -#define _LINUX_REISER_FS_H - -#include <linux/types.h> -#include <linux/magic.h> - -/* - * include/linux/reiser_fs.h - * - * Reiser File System constants and structures - * - */ - -/* ioctl's command */ -#define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) -/* define following flags to be the same as in ext2, so that chattr(1), - lsattr(1) will work with us. */ -#define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS -#define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS -#define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION -#define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION - -#endif /* _LINUX_REISER_FS_H */ diff --git a/include/uapi/linux/reiserfs_xattr.h b/include/uapi/linux/reiserfs_xattr.h deleted file mode 100644 index 503ad018ce5b..000000000000 --- a/include/uapi/linux/reiserfs_xattr.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - File: linux/reiserfs_xattr.h -*/ - -#ifndef _LINUX_REISERFS_XATTR_H -#define _LINUX_REISERFS_XATTR_H - -#include <linux/types.h> - -/* Magic value in header */ -#define REISERFS_XATTR_MAGIC 0x52465841 /* "RFXA" */ - -struct reiserfs_xattr_header { - __le32 h_magic; /* magic number for identification */ - __le32 h_hash; /* hash of the value */ -}; - -struct reiserfs_security_handle { - const char *name; - void *value; - __kernel_size_t length; -}; - -#endif /* _LINUX_REISERFS_XATTR_H */ diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 3b687d20c9ed..db7254d52d93 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -174,7 +174,7 @@ enum { #define RTM_GETLINKPROP RTM_GETLINKPROP RTM_NEWVLAN = 112, -#define RTM_NEWNVLAN RTM_NEWVLAN +#define RTM_NEWVLAN RTM_NEWVLAN RTM_DELVLAN, #define RTM_DELVLAN RTM_DELVLAN RTM_GETVLAN, diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h index d3994b7716bc..9025dd5a4f0f 100644 --- a/include/uapi/linux/sed-opal.h +++ b/include/uapi/linux/sed-opal.h @@ -215,5 +215,6 @@ struct opal_revert_lsp { #define IOC_OPAL_GET_GEOMETRY _IOR('p', 238, struct opal_geometry) #define IOC_OPAL_DISCOVERY _IOW('p', 239, struct opal_discovery) #define IOC_OPAL_REVERT_LSP _IOW('p', 240, struct opal_revert_lsp) +#define IOC_OPAL_SET_SID_PW _IOW('p', 241, struct opal_new_pw) #endif /* _UAPI_SED_OPAL_H */ diff --git a/include/uapi/linux/thermal.h b/include/uapi/linux/thermal.h index fc78bf3aead7..ba8604bdf206 100644 --- a/include/uapi/linux/thermal.h +++ b/include/uapi/linux/thermal.h @@ -3,6 +3,8 @@ #define _UAPI_LINUX_THERMAL_H #define THERMAL_NAME_LENGTH 20 +#define THERMAL_THRESHOLD_WAY_UP BIT(0) +#define THERMAL_THRESHOLD_WAY_DOWN BIT(1) enum thermal_device_mode { THERMAL_DEVICE_DISABLED = 0, @@ -18,7 +20,7 @@ enum thermal_trip_type { /* Adding event notification support elements */ #define THERMAL_GENL_FAMILY_NAME "thermal" -#define THERMAL_GENL_VERSION 0x01 +#define THERMAL_GENL_VERSION 0x02 #define THERMAL_GENL_SAMPLING_GROUP_NAME "sampling" #define THERMAL_GENL_EVENT_GROUP_NAME "event" @@ -28,6 +30,7 @@ enum thermal_genl_attr { THERMAL_GENL_ATTR_TZ, THERMAL_GENL_ATTR_TZ_ID, THERMAL_GENL_ATTR_TZ_TEMP, + THERMAL_GENL_ATTR_TZ_PREV_TEMP, THERMAL_GENL_ATTR_TZ_TRIP, THERMAL_GENL_ATTR_TZ_TRIP_ID, THERMAL_GENL_ATTR_TZ_TRIP_TYPE, @@ -48,6 +51,9 @@ enum thermal_genl_attr { THERMAL_GENL_ATTR_CPU_CAPABILITY_ID, THERMAL_GENL_ATTR_CPU_CAPABILITY_PERFORMANCE, THERMAL_GENL_ATTR_CPU_CAPABILITY_EFFICIENCY, + THERMAL_GENL_ATTR_THRESHOLD, + THERMAL_GENL_ATTR_THRESHOLD_TEMP, + THERMAL_GENL_ATTR_THRESHOLD_DIRECTION, __THERMAL_GENL_ATTR_MAX, }; #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) @@ -75,6 +81,11 @@ enum thermal_genl_event { THERMAL_GENL_EVENT_CDEV_STATE_UPDATE, /* Cdev state updated */ THERMAL_GENL_EVENT_TZ_GOV_CHANGE, /* Governor policy changed */ THERMAL_GENL_EVENT_CPU_CAPABILITY_CHANGE, /* CPU capability changed */ + THERMAL_GENL_EVENT_THRESHOLD_ADD, /* A thresold has been added */ + THERMAL_GENL_EVENT_THRESHOLD_DELETE, /* A thresold has been deleted */ + THERMAL_GENL_EVENT_THRESHOLD_FLUSH, /* All thresolds have been deleted */ + THERMAL_GENL_EVENT_THRESHOLD_UP, /* A thresold has been crossed the way up */ + THERMAL_GENL_EVENT_THRESHOLD_DOWN, /* A thresold has been crossed the way down */ __THERMAL_GENL_EVENT_MAX, }; #define THERMAL_GENL_EVENT_MAX (__THERMAL_GENL_EVENT_MAX - 1) @@ -82,12 +93,16 @@ enum thermal_genl_event { /* Commands supported by the thermal_genl_family */ enum thermal_genl_cmd { THERMAL_GENL_CMD_UNSPEC, - THERMAL_GENL_CMD_TZ_GET_ID, /* List of thermal zones id */ - THERMAL_GENL_CMD_TZ_GET_TRIP, /* List of thermal trips */ - THERMAL_GENL_CMD_TZ_GET_TEMP, /* Get the thermal zone temperature */ - THERMAL_GENL_CMD_TZ_GET_GOV, /* Get the thermal zone governor */ - THERMAL_GENL_CMD_TZ_GET_MODE, /* Get the thermal zone mode */ - THERMAL_GENL_CMD_CDEV_GET, /* List of cdev id */ + THERMAL_GENL_CMD_TZ_GET_ID, /* List of thermal zones id */ + THERMAL_GENL_CMD_TZ_GET_TRIP, /* List of thermal trips */ + THERMAL_GENL_CMD_TZ_GET_TEMP, /* Get the thermal zone temperature */ + THERMAL_GENL_CMD_TZ_GET_GOV, /* Get the thermal zone governor */ + THERMAL_GENL_CMD_TZ_GET_MODE, /* Get the thermal zone mode */ + THERMAL_GENL_CMD_CDEV_GET, /* List of cdev id */ + THERMAL_GENL_CMD_THRESHOLD_GET, /* List of thresholds */ + THERMAL_GENL_CMD_THRESHOLD_ADD, /* Add a threshold */ + THERMAL_GENL_CMD_THRESHOLD_DELETE, /* Delete a threshold */ + THERMAL_GENL_CMD_THRESHOLD_FLUSH, /* Flush all the thresholds */ __THERMAL_GENL_CMD_MAX, }; #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) diff --git a/include/uapi/linux/types.h b/include/uapi/linux/types.h index 6375a0684052..48b933938877 100644 --- a/include/uapi/linux/types.h +++ b/include/uapi/linux/types.h @@ -53,6 +53,7 @@ typedef __u32 __bitwise __wsum; * No conversions are necessary between 32-bit user-space and a 64-bit kernel. */ #define __aligned_u64 __u64 __attribute__((aligned(8))) +#define __aligned_s64 __s64 __attribute__((aligned(8))) #define __aligned_be64 __be64 __attribute__((aligned(8))) #define __aligned_le64 __le64 __attribute__((aligned(8))) diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h index 12873639ea96..a8bc98bb69fc 100644 --- a/include/uapi/linux/ublk_cmd.h +++ b/include/uapi/linux/ublk_cmd.h @@ -147,8 +147,18 @@ */ #define UBLK_F_NEED_GET_DATA (1UL << 2) +/* + * - Block devices are recoverable if ublk server exits and restarts + * - Outstanding I/O when ublk server exits is met with errors + * - I/O issued while there is no ublk server queues + */ #define UBLK_F_USER_RECOVERY (1UL << 3) +/* + * - Block devices are recoverable if ublk server exits and restarts + * - Outstanding I/O when ublk server exits is reissued + * - I/O issued while there is no ublk server queues + */ #define UBLK_F_USER_RECOVERY_REISSUE (1UL << 4) /* @@ -190,10 +200,18 @@ */ #define UBLK_F_ZONED (1ULL << 8) +/* + * - Block devices are recoverable if ublk server exits and restarts + * - Outstanding I/O when ublk server exits is met with errors + * - I/O issued while there is no ublk server is met with errors + */ +#define UBLK_F_USER_RECOVERY_FAIL_IO (1ULL << 9) + /* device state */ #define UBLK_S_DEV_DEAD 0 #define UBLK_S_DEV_LIVE 1 #define UBLK_S_DEV_QUIESCED 2 +#define UBLK_S_DEV_FAIL_IO 3 /* shipped via sqe->cmd of io_uring command */ struct ublksrv_ctrl_cmd { diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h index 1a0fe8b151fb..d85d671deed3 100644 --- a/include/uapi/linux/udp.h +++ b/include/uapi/linux/udp.h @@ -31,7 +31,7 @@ struct udphdr { #define UDP_CORK 1 /* Never send partially complete segments */ #define UDP_ENCAP 100 /* Set the socket to accept encapsulated packets */ #define UDP_NO_CHECK6_TX 101 /* Disable sending checksum for UDP6X */ -#define UDP_NO_CHECK6_RX 102 /* Disable accpeting checksum for UDP6 */ +#define UDP_NO_CHECK6_RX 102 /* Disable accepting checksum for UDP6 */ #define UDP_SEGMENT 103 /* Set GSO segmentation size */ #define UDP_GRO 104 /* This socket can receive UDP GRO packets */ diff --git a/include/uapi/linux/usb/video.h b/include/uapi/linux/usb/video.h index 2ff0e8a3a683..526b5155e23c 100644 --- a/include/uapi/linux/usb/video.h +++ b/include/uapi/linux/usb/video.h @@ -597,5 +597,63 @@ struct UVC_FRAME_MJPEG(n) { \ __le32 dwFrameInterval[n]; \ } __attribute__ ((packed)) +/* Frame Based Payload - 3.1.1. Frame Based Video Format Descriptor */ +struct uvc_format_framebased { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFormatIndex; + __u8 bNumFrameDescriptors; + __u8 guidFormat[16]; + __u8 bBitsPerPixel; + __u8 bDefaultFrameIndex; + __u8 bAspectRatioX; + __u8 bAspectRatioY; + __u8 bmInterfaceFlags; + __u8 bCopyProtect; + __u8 bVariableSize; +} __attribute__((__packed__)); + +#define UVC_DT_FORMAT_FRAMEBASED_SIZE 28 + +/* Frame Based Payload - 3.1.2. Frame Based Video Frame Descriptor */ +struct uvc_frame_framebased { + __u8 bLength; + __u8 bDescriptorType; + __u8 bDescriptorSubType; + __u8 bFrameIndex; + __u8 bmCapabilities; + __u16 wWidth; + __u16 wHeight; + __u32 dwMinBitRate; + __u32 dwMaxBitRate; + __u32 dwDefaultFrameInterval; + __u8 bFrameIntervalType; + __u32 dwBytesPerLine; + __u32 dwFrameInterval[]; +} __attribute__((__packed__)); + +#define UVC_DT_FRAME_FRAMEBASED_SIZE(n) (26+4*(n)) + +#define UVC_FRAME_FRAMEBASED(n) \ + uvc_frame_framebased_##n + +#define DECLARE_UVC_FRAME_FRAMEBASED(n) \ +struct UVC_FRAME_FRAMEBASED(n) { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubType; \ + __u8 bFrameIndex; \ + __u8 bmCapabilities; \ + __u16 wWidth; \ + __u16 wHeight; \ + __u32 dwMinBitRate; \ + __u32 dwMaxBitRate; \ + __u32 dwDefaultFrameInterval; \ + __u8 bFrameIntervalType; \ + __u32 dwBytesPerLine; \ + __u32 dwFrameInterval[n]; \ +} __attribute__ ((packed)) + #endif /* __LINUX_USB_VIDEO_H */ diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h index ef0128c7369c..44a16e0e5a12 100644 --- a/include/uapi/linux/v4l2-dv-timings.h +++ b/include/uapi/linux/v4l2-dv-timings.h @@ -2,7 +2,7 @@ /* * V4L2 DV timings header. * - * Copyright (C) 2012-2016 Hans Verkuil <hans.verkuil@cisco.com> + * Copyright (C) 2012-2016 Hans Verkuil <hansverk@cisco.com> */ #ifndef _V4L2_DV_TIMINGS_H diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 2b68e6cdf190..c8dbf8219c4f 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -35,7 +35,7 @@ #define VFIO_EEH 5 /* Two-stage IOMMU */ -#define VFIO_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ +#define __VFIO_RESERVED_TYPE1_NESTING_IOMMU 6 /* Implies v2 */ #define VFIO_SPAPR_TCE_v2_IOMMU 7 diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 27239cb64065..e7c4dce39007 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -798,6 +798,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_S5C_UYVY_JPG v4l2_fourcc('S', '5', 'C', 'I') /* S5C73M3 interleaved UYVY/JPEG */ #define V4L2_PIX_FMT_Y8I v4l2_fourcc('Y', '8', 'I', ' ') /* Greyscale 8-bit L/R interleaved */ #define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */ +#define V4L2_PIX_FMT_Y16I v4l2_fourcc('Y', '1', '6', 'I') /* Greyscale 16-bit L/R interleaved */ #define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */ #define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */ #define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */ @@ -859,6 +860,8 @@ struct v4l2_pix_format { /* Vendor specific - used for RaspberryPi PiSP */ #define V4L2_META_FMT_RPI_BE_CFG v4l2_fourcc('R', 'P', 'B', 'C') /* PiSP BE configuration */ +#define V4L2_META_FMT_RPI_FE_CFG v4l2_fourcc('R', 'P', 'F', 'C') /* PiSP FE configuration */ +#define V4L2_META_FMT_RPI_FE_STATS v4l2_fourcc('R', 'P', 'F', 'S') /* PiSP FE stats */ #ifdef __KERNEL__ /* @@ -906,6 +909,9 @@ struct v4l2_fmtdesc { #define V4L2_FMT_FLAG_CSC_QUANTIZATION 0x0100 #define V4L2_FMT_FLAG_META_LINE_BASED 0x0200 +/* Format description flag, to be ORed with the index */ +#define V4L2_FMTDESC_FLAG_ENUM_ALL 0x80000000 + /* Frame Size and frame rate enumeration */ /* * F R A M E S I Z E E N U M E R A T I O N diff --git a/include/uapi/linux/virtio_crypto.h b/include/uapi/linux/virtio_crypto.h index 71a54a6849ca..2fccb64c9d6b 100644 --- a/include/uapi/linux/virtio_crypto.h +++ b/include/uapi/linux/virtio_crypto.h @@ -329,6 +329,7 @@ struct virtio_crypto_op_header { VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x00) #define VIRTIO_CRYPTO_AKCIPHER_DECRYPT \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x01) + /* akcipher sign/verify opcodes are deprecated */ #define VIRTIO_CRYPTO_AKCIPHER_SIGN \ VIRTIO_CRYPTO_OPCODE(VIRTIO_CRYPTO_SERVICE_AKCIPHER, 0x02) #define VIRTIO_CRYPTO_AKCIPHER_VERIFY \ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index a8208492e822..1beb317df1b9 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -40,6 +40,7 @@ #define _LINUX_VIRTIO_PCI_H #include <linux/types.h> +#include <linux/kernel.h> #ifndef VIRTIO_PCI_NO_LEGACY @@ -240,6 +241,17 @@ struct virtio_pci_cfg_cap { #define VIRTIO_ADMIN_CMD_LEGACY_DEV_CFG_READ 0x5 #define VIRTIO_ADMIN_CMD_LEGACY_NOTIFY_INFO 0x6 +/* Device parts access commands. */ +#define VIRTIO_ADMIN_CMD_CAP_ID_LIST_QUERY 0x7 +#define VIRTIO_ADMIN_CMD_DEVICE_CAP_GET 0x8 +#define VIRTIO_ADMIN_CMD_DRIVER_CAP_SET 0x9 +#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_CREATE 0xa +#define VIRTIO_ADMIN_CMD_RESOURCE_OBJ_DESTROY 0xd +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_GET 0xe +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET 0xf +#define VIRTIO_ADMIN_CMD_DEV_PARTS_SET 0x10 +#define VIRTIO_ADMIN_CMD_DEV_MODE_SET 0x11 + struct virtio_admin_cmd_hdr { __le16 opcode; /* @@ -286,4 +298,123 @@ struct virtio_admin_cmd_notify_info_result { struct virtio_admin_cmd_notify_info_data entries[VIRTIO_ADMIN_CMD_MAX_NOTIFY_INFO]; }; +#define VIRTIO_DEV_PARTS_CAP 0x0000 + +struct virtio_dev_parts_cap { + __u8 get_parts_resource_objects_limit; + __u8 set_parts_resource_objects_limit; +}; + +#define MAX_CAP_ID __KERNEL_DIV_ROUND_UP(VIRTIO_DEV_PARTS_CAP + 1, 64) + +struct virtio_admin_cmd_query_cap_id_result { + __le64 supported_caps[MAX_CAP_ID]; +}; + +struct virtio_admin_cmd_cap_get_data { + __le16 id; + __u8 reserved[6]; +}; + +struct virtio_admin_cmd_cap_set_data { + __le16 id; + __u8 reserved[6]; + __u8 cap_specific_data[]; +}; + +struct virtio_admin_cmd_resource_obj_cmd_hdr { + __le16 type; + __u8 reserved[2]; + __le32 id; /* Indicates unique resource object id per resource object type */ +}; + +struct virtio_admin_cmd_resource_obj_create_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __le64 flags; + __u8 resource_obj_specific_data[]; +}; + +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS 0 + +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_GET 0 +#define VIRTIO_RESOURCE_OBJ_DEV_PARTS_TYPE_SET 1 + +struct virtio_resource_obj_dev_parts { + __u8 type; + __u8 reserved[7]; +}; + +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_SIZE 0 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_COUNT 1 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_METADATA_TYPE_LIST 2 + +struct virtio_admin_cmd_dev_parts_metadata_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; +}; + +#define VIRTIO_DEV_PART_F_OPTIONAL 0 + +struct virtio_dev_part_hdr { + __le16 part_type; + __u8 flags; + __u8 reserved; + union { + struct { + __le32 offset; + __le32 reserved; + } pci_common_cfg; + struct { + __le16 index; + __u8 reserved[6]; + } vq_index; + } selector; + __le32 length; +}; + +struct virtio_dev_part { + struct virtio_dev_part_hdr hdr; + __u8 value[]; +}; + +struct virtio_admin_cmd_dev_parts_metadata_result { + union { + struct { + __le32 size; + __le32 reserved; + } parts_size; + struct { + __le32 count; + __le32 reserved; + } hdr_list_count; + struct { + __le32 count; + __le32 reserved; + struct virtio_dev_part_hdr hdrs[]; + } hdr_list; + }; +}; + +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_SELECTED 0 +#define VIRTIO_ADMIN_CMD_DEV_PARTS_GET_TYPE_ALL 1 + +struct virtio_admin_cmd_dev_parts_get_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + __u8 type; + __u8 reserved[7]; + struct virtio_dev_part_hdr hdr_list[]; +}; + +struct virtio_admin_cmd_dev_parts_set_data { + struct virtio_admin_cmd_resource_obj_cmd_hdr hdr; + struct virtio_dev_part parts[]; +}; + +#define VIRTIO_ADMIN_CMD_DEV_MODE_F_STOPPED 0 + +struct virtio_admin_cmd_dev_mode_set_data { + __u8 flags; +}; + #endif diff --git a/include/uapi/linux/vmclock-abi.h b/include/uapi/linux/vmclock-abi.h new file mode 100644 index 000000000000..2d99b29ac44a --- /dev/null +++ b/include/uapi/linux/vmclock-abi.h @@ -0,0 +1,182 @@ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ + +/* + * This structure provides a vDSO-style clock to VM guests, exposing the + * relationship (or lack thereof) between the CPU clock (TSC, timebase, arch + * counter, etc.) and real time. It is designed to address the problem of + * live migration, which other clock enlightenments do not. + * + * When a guest is live migrated, this affects the clock in two ways. + * + * First, even between identical hosts the actual frequency of the underlying + * counter will change within the tolerances of its specification (typically + * ±50PPM, or 4 seconds a day). This frequency also varies over time on the + * same host, but can be tracked by NTP as it generally varies slowly. With + * live migration there is a step change in the frequency, with no warning. + * + * Second, there may be a step change in the value of the counter itself, as + * its accuracy is limited by the precision of the NTP synchronization on the + * source and destination hosts. + * + * So any calibration (NTP, PTP, etc.) which the guest has done on the source + * host before migration is invalid, and needs to be redone on the new host. + * + * In its most basic mode, this structure provides only an indication to the + * guest that live migration has occurred. This allows the guest to know that + * its clock is invalid and take remedial action. For applications that need + * reliable accurate timestamps (e.g. distributed databases), the structure + * can be mapped all the way to userspace. This allows the application to see + * directly for itself that the clock is disrupted and take appropriate + * action, even when using a vDSO-style method to get the time instead of a + * system call. + * + * In its more advanced mode. this structure can also be used to expose the + * precise relationship of the CPU counter to real time, as calibrated by the + * host. This means that userspace applications can have accurate time + * immediately after live migration, rather than having to pause operations + * and wait for NTP to recover. This mode does, of course, rely on the + * counter being reliable and consistent across CPUs. + * + * Note that this must be true UTC, never with smeared leap seconds. If a + * guest wishes to construct a smeared clock, it can do so. Presenting a + * smeared clock through this interface would be problematic because it + * actually messes with the apparent counter *period*. A linear smearing + * of 1 ms per second would effectively tweak the counter period by 1000PPM + * at the start/end of the smearing period, while a sinusoidal smear would + * basically be impossible to represent. + * + * This structure is offered with the intent that it be adopted into the + * nascent virtio-rtc standard, as a virtio-rtc that does not address the live + * migration problem seems a little less than fit for purpose. For that + * reason, certain fields use precisely the same numeric definitions as in + * the virtio-rtc proposal. The structure can also be exposed through an ACPI + * device with the CID "VMCLOCK", modelled on the "VMGENID" device except for + * the fact that it uses a real _CRS to convey the address of the structure + * (which should be a full page, to allow for mapping directly to userspace). + */ + +#ifndef __VMCLOCK_ABI_H__ +#define __VMCLOCK_ABI_H__ + +#include <linux/types.h> + +struct vmclock_abi { + /* CONSTANT FIELDS */ + __le32 magic; +#define VMCLOCK_MAGIC 0x4b4c4356 /* "VCLK" */ + __le32 size; /* Size of region containing this structure */ + __le16 version; /* 1 */ + __u8 counter_id; /* Matches VIRTIO_RTC_COUNTER_xxx except INVALID */ +#define VMCLOCK_COUNTER_ARM_VCNT 0 +#define VMCLOCK_COUNTER_X86_TSC 1 +#define VMCLOCK_COUNTER_INVALID 0xff + __u8 time_type; /* Matches VIRTIO_RTC_TYPE_xxx */ +#define VMCLOCK_TIME_UTC 0 /* Since 1970-01-01 00:00:00z */ +#define VMCLOCK_TIME_TAI 1 /* Since 1970-01-01 00:00:00z */ +#define VMCLOCK_TIME_MONOTONIC 2 /* Since undefined epoch */ +#define VMCLOCK_TIME_INVALID_SMEARED 3 /* Not supported */ +#define VMCLOCK_TIME_INVALID_MAYBE_SMEARED 4 /* Not supported */ + + /* NON-CONSTANT FIELDS PROTECTED BY SEQCOUNT LOCK */ + __le32 seq_count; /* Low bit means an update is in progress */ + /* + * This field changes to another non-repeating value when the CPU + * counter is disrupted, for example on live migration. This lets + * the guest know that it should discard any calibration it has + * performed of the counter against external sources (NTP/PTP/etc.). + */ + __le64 disruption_marker; + __le64 flags; + /* Indicates that the tai_offset_sec field is valid */ +#define VMCLOCK_FLAG_TAI_OFFSET_VALID (1 << 0) + /* + * Optionally used to notify guests of pending maintenance events. + * A guest which provides latency-sensitive services may wish to + * remove itself from service if an event is coming up. Two flags + * indicate the approximate imminence of the event. + */ +#define VMCLOCK_FLAG_DISRUPTION_SOON (1 << 1) /* About a day */ +#define VMCLOCK_FLAG_DISRUPTION_IMMINENT (1 << 2) /* About an hour */ +#define VMCLOCK_FLAG_PERIOD_ESTERROR_VALID (1 << 3) +#define VMCLOCK_FLAG_PERIOD_MAXERROR_VALID (1 << 4) +#define VMCLOCK_FLAG_TIME_ESTERROR_VALID (1 << 5) +#define VMCLOCK_FLAG_TIME_MAXERROR_VALID (1 << 6) + /* + * If the MONOTONIC flag is set then (other than leap seconds) it is + * guaranteed that the time calculated according this structure at + * any given moment shall never appear to be later than the time + * calculated via the structure at any *later* moment. + * + * In particular, a timestamp based on a counter reading taken + * immediately after setting the low bit of seq_count (and the + * associated memory barrier), using the previously-valid time and + * period fields, shall never be later than a timestamp based on + * a counter reading taken immediately before *clearing* the low + * bit again after the update, using the about-to-be-valid fields. + */ +#define VMCLOCK_FLAG_TIME_MONOTONIC (1 << 7) + + __u8 pad[2]; + __u8 clock_status; +#define VMCLOCK_STATUS_UNKNOWN 0 +#define VMCLOCK_STATUS_INITIALIZING 1 +#define VMCLOCK_STATUS_SYNCHRONIZED 2 +#define VMCLOCK_STATUS_FREERUNNING 3 +#define VMCLOCK_STATUS_UNRELIABLE 4 + + /* + * The time exposed through this device is never smeared. This field + * corresponds to the 'subtype' field in virtio-rtc, which indicates + * the smearing method. However in this case it provides a *hint* to + * the guest operating system, such that *if* the guest OS wants to + * provide its users with an alternative clock which does not follow + * UTC, it may do so in a fashion consistent with the other systems + * in the nearby environment. + */ + __u8 leap_second_smearing_hint; /* Matches VIRTIO_RTC_SUBTYPE_xxx */ +#define VMCLOCK_SMEARING_STRICT 0 +#define VMCLOCK_SMEARING_NOON_LINEAR 1 +#define VMCLOCK_SMEARING_UTC_SLS 2 + __le16 tai_offset_sec; /* Actually two's complement signed */ + __u8 leap_indicator; + /* + * This field is based on the VIRTIO_RTC_LEAP_xxx values as defined + * in the current draft of virtio-rtc, but since smearing cannot be + * used with the shared memory device, some values are not used. + * + * The _POST_POS and _POST_NEG values allow the guest to perform + * its own smearing during the day or so after a leap second when + * such smearing may need to continue being applied for a leap + * second which is now theoretically "historical". + */ +#define VMCLOCK_LEAP_NONE 0x00 /* No known nearby leap second */ +#define VMCLOCK_LEAP_PRE_POS 0x01 /* Positive leap second at EOM */ +#define VMCLOCK_LEAP_PRE_NEG 0x02 /* Negative leap second at EOM */ +#define VMCLOCK_LEAP_POS 0x03 /* Set during 23:59:60 second */ +#define VMCLOCK_LEAP_POST_POS 0x04 +#define VMCLOCK_LEAP_POST_NEG 0x05 + + /* Bit shift for counter_period_frac_sec and its error rate */ + __u8 counter_period_shift; + /* + * Paired values of counter and UTC at a given point in time. + */ + __le64 counter_value; + /* + * Counter period, and error margin of same. The unit of these + * fields is 1/2^(64 + counter_period_shift) of a second. + */ + __le64 counter_period_frac_sec; + __le64 counter_period_esterror_rate_frac_sec; + __le64 counter_period_maxerror_rate_frac_sec; + + /* + * Time according to time_type field above. + */ + __le64 time_sec; /* Seconds since time_type epoch */ + __le64 time_frac_sec; /* Units of 1/2^64 of a second */ + __le64 time_esterror_nanosec; + __le64 time_maxerror_nanosec; +}; + +#endif /* __VMCLOCK_ABI_H__ */ diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h index 9463db2dfa9d..9854f9cff3c6 100644 --- a/include/uapi/linux/xattr.h +++ b/include/uapi/linux/xattr.h @@ -11,6 +11,7 @@ */ #include <linux/libc-compat.h> +#include <linux/types.h> #ifndef _UAPI_LINUX_XATTR_H #define _UAPI_LINUX_XATTR_H @@ -20,6 +21,12 @@ #define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ #define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ + +struct xattr_args { + __aligned_u64 __user value; + __u32 size; + __u32 flags; +}; #endif /* Namespaces */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index f28701500714..d73a97e3030a 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -322,6 +322,7 @@ enum xfrm_attr_type_t { XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */ XFRMA_SA_DIR, /* __u8 */ XFRMA_NAT_KEEPALIVE_INTERVAL, /* __u32 in seconds for NAT keepalive */ + XFRMA_SA_PCPU, /* __u32 */ __XFRMA_MAX #define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */ @@ -437,6 +438,7 @@ struct xfrm_userpolicy_info { #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */ /* Automatically expand selector to include matching ICMP payloads. */ #define XFRM_POLICY_ICMP 2 +#define XFRM_POLICY_CPU_ACQUIRE 4 __u8 share; }; diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index d689b8b34189..11b94b0b035b 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -95,7 +95,8 @@ struct efa_ibv_create_qp { __u32 sq_ring_size; /* bytes */ __u32 driver_qp_type; __u16 flags; - __u8 reserved_90[6]; + __u8 sl; + __u8 reserved_98[5]; }; struct efa_ibv_create_qp_resp { diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h index d4f6a36dffb0..8a6ad6c6841c 100644 --- a/include/uapi/rdma/mlx5-abi.h +++ b/include/uapi/rdma/mlx5-abi.h @@ -252,6 +252,7 @@ enum mlx5_ib_query_dev_resp_flags { MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1, MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2, MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3, + MLX5_IB_QUERY_DEV_RESP_FLAGS_OOO_DP = 1 << 4, }; enum mlx5_ib_tunnel_offloads { @@ -439,6 +440,10 @@ struct mlx5_ib_burst_info { __u16 reserved; }; +enum mlx5_ib_modify_qp_mask { + MLX5_IB_MODIFY_QP_OOO_DP = 1 << 0, +}; + struct mlx5_ib_modify_qp { __u32 comp_mask; struct mlx5_ib_burst_info burst_info; diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h index 39be09c0ffbb..9f9cf20c1cd8 100644 --- a/include/uapi/rdma/rdma_netlink.h +++ b/include/uapi/rdma/rdma_netlink.h @@ -638,6 +638,8 @@ enum rdma_nl_notify_event_type { RDMA_UNREGISTER_EVENT, RDMA_NETDEV_ATTACH_EVENT, RDMA_NETDEV_DETACH_EVENT, + RDMA_RENAME_EVENT, + RDMA_NETDEV_RENAME_EVENT, }; #endif /* _UAPI_RDMA_NETLINK_H */ diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h index d185957f3fe0..d62eb93af0ed 100644 --- a/include/uapi/sound/compress_offload.h +++ b/include/uapi/sound/compress_offload.h @@ -14,7 +14,7 @@ #include <sound/compress_params.h> -#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 2, 0) +#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 3, 0) /** * struct snd_compressed_buffer - compressed buffer * @fragment_size: size of buffer fragment in bytes @@ -68,7 +68,8 @@ struct snd_compr_avail { enum snd_compr_direction { SND_COMPRESS_PLAYBACK = 0, - SND_COMPRESS_CAPTURE + SND_COMPRESS_CAPTURE, + SND_COMPRESS_ACCEL }; /** @@ -127,6 +128,59 @@ struct snd_compr_metadata { __u32 value[8]; } __attribute__((packed, aligned(4))); +/* flags for struct snd_compr_task */ +#define SND_COMPRESS_TFLG_NEW_STREAM (1<<0) /* mark for the new stream data */ + +/** + * struct snd_compr_task - task primitive for non-realtime operation + * @seqno: sequence number (task identifier) + * @origin_seqno: previous sequence number (task identifier) - for reuse + * @input_fd: data input file descriptor (dma-buf) + * @output_fd: data output file descriptor (dma-buf) + * @input_size: filled data in bytes (from caller, must not exceed fragment size) + * @flags: see SND_COMPRESS_TFLG_* defines + * @reserved: reserved for future extension + */ +struct snd_compr_task { + __u64 seqno; + __u64 origin_seqno; + int input_fd; + int output_fd; + __u64 input_size; + __u32 flags; + __u8 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * enum snd_compr_state - task state + * @SND_COMPRESS_TASK_STATE_IDLE: task is not queued + * @SND_COMPRESS_TASK_STATE_ACTIVE: task is in the queue + * @SND_COMPRESS_TASK_STATE_FINISHED: task was processed, output is available + */ +enum snd_compr_state { + SND_COMPRESS_TASK_STATE_IDLE = 0, + SND_COMPRESS_TASK_STATE_ACTIVE, + SND_COMPRESS_TASK_STATE_FINISHED +}; + +/** + * struct snd_compr_task_status - task status + * @seqno: sequence number (task identifier) + * @input_size: filled data in bytes (from user space) + * @output_size: filled data in bytes (from driver) + * @output_flags: reserved for future (all zeros - from driver) + * @state: actual task state (SND_COMPRESS_TASK_STATE_*) + * @reserved: reserved for future extension + */ +struct snd_compr_task_status { + __u64 seqno; + __u64 input_size; + __u64 output_size; + __u32 output_flags; + __u8 state; + __u8 reserved[15]; +} __attribute__((packed, aligned(4))); + /* * compress path ioctl definitions * SNDRV_COMPRESS_GET_CAPS: Query capability of DSP @@ -164,6 +218,14 @@ struct snd_compr_metadata { #define SNDRV_COMPRESS_DRAIN _IO('C', 0x34) #define SNDRV_COMPRESS_NEXT_TRACK _IO('C', 0x35) #define SNDRV_COMPRESS_PARTIAL_DRAIN _IO('C', 0x36) + + +#define SNDRV_COMPRESS_TASK_CREATE _IOWR('C', 0x60, struct snd_compr_task) +#define SNDRV_COMPRESS_TASK_FREE _IOW('C', 0x61, __u64) +#define SNDRV_COMPRESS_TASK_START _IOWR('C', 0x62, struct snd_compr_task) +#define SNDRV_COMPRESS_TASK_STOP _IOW('C', 0x63, __u64) +#define SNDRV_COMPRESS_TASK_STATUS _IOWR('C', 0x68, struct snd_compr_task_status) + /* * TODO * 1. add mmap support diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 3f68ae3e4330..d7aca9e61684 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -299,6 +299,8 @@ struct ufs_pwr_mode_info { * @max_num_rtt: maximum RTT supported by the host * @init: called when the driver is initialized * @exit: called to cleanup everything done in init + * @set_dma_mask: For setting another DMA mask than indicated by the 64AS + * capability bit. * @get_ufs_hci_version: called to get UFS HCI version * @clk_scale_notify: notifies that clks are scaled up/down * @setup_clocks: called before touching any of the controller registers @@ -341,6 +343,7 @@ struct ufs_hba_variant_ops { int (*init)(struct ufs_hba *); void (*exit)(struct ufs_hba *); u32 (*get_ufs_hci_version)(struct ufs_hba *); + int (*set_dma_mask)(struct ufs_hba *); int (*clk_scale_notify)(struct ufs_hba *, bool, enum ufs_notify_change_status); int (*setup_clocks)(struct ufs_hba *, bool, @@ -383,6 +386,7 @@ struct ufs_hba_variant_ops { int (*get_outstanding_cqs)(struct ufs_hba *hba, unsigned long *ocqs); int (*config_esi)(struct ufs_hba *hba); + void (*config_scsi_dev)(struct scsi_device *sdev); }; /* clock gating state */ @@ -625,12 +629,6 @@ enum ufshcd_quirks { /* * This quirk needs to be enabled if the host controller has - * 64-bit addressing supported capability but it doesn't work. - */ - UFSHCD_QUIRK_BROKEN_64BIT_ADDRESS = 1 << 17, - - /* - * This quirk needs to be enabled if the host controller has * auto-hibernate capability but it's FASTAUTO only. */ UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18, @@ -877,9 +875,10 @@ enum ufshcd_mcq_opr { * @tmf_tag_set: TMF tag set. * @tmf_queue: Used to allocate TMF tags. * @tmf_rqs: array with pointers to TMF requests while these are in progress. - * @active_uic_cmd: handle of active UIC command - * @uic_cmd_mutex: mutex for UIC command - * @uic_async_done: completion used during UIC processing + * @active_uic_cmd: pointer to active UIC command. + * @uic_cmd_mutex: mutex used for serializing UIC command processing. + * @uic_async_done: completion used to wait for power mode or hibernation state + * changes. * @ufshcd_state: UFSHCD state * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits @@ -927,7 +926,6 @@ enum ufshcd_mcq_opr { * @wb_mutex: used to serialize devfreq and sysfs write booster toggling * @clk_scaling_lock: used to serialize device commands and clock scaling * @desc_size: descriptor sizes reported by device - * @scsi_block_reqs_cnt: reference counting for scsi block requests * @bsg_dev: struct device associated with the BSG queue * @bsg_queue: BSG queue associated with the UFS controller * @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power @@ -1088,7 +1086,6 @@ struct ufs_hba { struct mutex wb_mutex; struct rw_semaphore clk_scaling_lock; - atomic_t scsi_block_reqs_cnt; struct device bsg_dev; struct request_queue *bsg_queue; @@ -1320,8 +1317,8 @@ void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); -void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_enable(struct ufs_hba *hba); +void ufshcd_mcq_enable_esi(struct ufs_hba *hba); void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); int ufshcd_opp_config_clks(struct device *dev, struct opp_table *opp_table, diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h index b85f24cac3f5..d967baa0cd0c 100644 --- a/include/vdso/datapage.h +++ b/include/vdso/datapage.h @@ -19,10 +19,10 @@ #include <vdso/time32.h> #include <vdso/time64.h> -#ifdef CONFIG_ARCH_HAS_VDSO_DATA -#include <asm/vdso/data.h> +#ifdef CONFIG_ARCH_HAS_VDSO_TIME_DATA +#include <asm/vdso/time_data.h> #else -struct arch_vdso_data {}; +struct arch_vdso_time_data {}; #endif #define VDSO_BASES (CLOCK_TAI + 1) @@ -114,7 +114,7 @@ struct vdso_data { u32 hrtimer_res; u32 __unused; - struct arch_vdso_data arch_data; + struct arch_vdso_time_data arch_data; }; /** diff --git a/include/vdso/page.h b/include/vdso/page.h new file mode 100644 index 000000000000..710ae2414e68 --- /dev/null +++ b/include/vdso/page.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __VDSO_PAGE_H +#define __VDSO_PAGE_H + +#include <uapi/linux/const.h> + +/* + * PAGE_SHIFT determines the page size. + * + * Note: This definition is required because PAGE_SHIFT is used + * in several places throuout the codebase. + */ +#define PAGE_SHIFT CONFIG_PAGE_SHIFT + +#define PAGE_SIZE (_AC(1,UL) << CONFIG_PAGE_SHIFT) + +#if !defined(CONFIG_64BIT) +/* + * Applies only to 32-bit architectures. + * + * Subtle: (1 << CONFIG_PAGE_SHIFT) is an int, not an unsigned long. + * So if we assign PAGE_MASK to a larger type it gets extended the + * way we want (i.e. with 1s in the high bits) while masking a + * 64-bit value such as phys_addr_t. + */ +#define PAGE_MASK (~((1 << CONFIG_PAGE_SHIFT) - 1)) +#else +#define PAGE_MASK (~(PAGE_SIZE - 1)) +#endif + +#endif /* __VDSO_PAGE_H */ diff --git a/include/video/omapfb_dss.h b/include/video/omapfb_dss.h index a8c0c3eeeb5b..d133190e3143 100644 --- a/include/video/omapfb_dss.h +++ b/include/video/omapfb_dss.h @@ -811,14 +811,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; } -struct device_node * -omapdss_of_get_next_port(const struct device_node *parent, - struct device_node *prev); - -struct device_node * -omapdss_of_get_next_endpoint(const struct device_node *parent, - struct device_node *prev); - struct omap_dss_device * omapdss_of_find_source_for_first_ep(struct device_node *node); #else diff --git a/include/video/platform_lcd.h b/include/video/platform_lcd.h index 6a95184a28c1..2bdf46519298 100644 --- a/include/video/platform_lcd.h +++ b/include/video/platform_lcd.h @@ -8,11 +8,8 @@ */ struct plat_lcd_data; -struct fb_info; struct plat_lcd_data { int (*probe)(struct plat_lcd_data *); void (*set_power)(struct plat_lcd_data *, unsigned int power); - int (*match_fb)(struct plat_lcd_data *, struct fb_info *); }; - |