diff options
Diffstat (limited to 'include/linux')
333 files changed, 9818 insertions, 3243 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 72e4f7fd268c..974d497a897d 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -249,7 +249,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -1380,13 +1380,11 @@ static inline int find_acpi_cpu_cache_topology(unsigned int cpu, int level) #endif #ifdef CONFIG_ACPI -extern int acpi_platform_notify(struct device *dev, enum kobject_action action); +extern void acpi_device_notify(struct device *dev); +extern void acpi_device_notify_remove(struct device *dev); #else -static inline int -acpi_platform_notify(struct device *dev, enum kobject_action action) -{ - return 0; -} +static inline void acpi_device_notify(struct device *dev) { } +static inline void acpi_device_notify_remove(struct device *dev) { } #endif #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/atomic.h b/include/linux/atomic.h index ed1d3ffd5b9d..8dd57c3a99e9 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h @@ -77,9 +77,8 @@ __ret; \ }) -#include <linux/atomic-arch-fallback.h> -#include <asm-generic/atomic-instrumented.h> - -#include <asm-generic/atomic-long.h> +#include <linux/atomic/atomic-arch-fallback.h> +#include <linux/atomic/atomic-long.h> +#include <linux/atomic/atomic-instrumented.h> #endif /* _LINUX_ATOMIC_H */ diff --git a/include/linux/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index a3dba31df01e..a3dba31df01e 100644 --- a/include/linux/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h new file mode 100644 index 000000000000..a0f654370da3 --- /dev/null +++ b/include/linux/atomic/atomic-instrumented.h @@ -0,0 +1,1915 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-instrumented.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +/* + * This file provides wrappers with KASAN instrumentation for atomic operations. + * To use this functionality an arch's atomic.h file needs to define all + * atomic operations with arch_ prefix (e.g. arch_atomic_read()) and include + * this file at the end. This file provides atomic_read() that forwards to + * arch_atomic_read() for actual atomic operation. + * Note: if an arch atomic operation is implemented by means of other atomic + * operations (e.g. atomic_read()/atomic_cmpxchg() loop), then it needs to use + * arch_ variants (i.e. arch_atomic_read()/arch_atomic_cmpxchg()) to avoid + * double instrumentation. + */ +#ifndef _LINUX_ATOMIC_INSTRUMENTED_H +#define _LINUX_ATOMIC_INSTRUMENTED_H + +#include <linux/build_bug.h> +#include <linux/compiler.h> +#include <linux/instrumented.h> + +static __always_inline int +atomic_read(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read(v); +} + +static __always_inline int +atomic_read_acquire(const atomic_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_read_acquire(v); +} + +static __always_inline void +atomic_set(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set(v, i); +} + +static __always_inline void +atomic_set_release(atomic_t *v, int i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_set_release(v, i); +} + +static __always_inline void +atomic_add(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_add(i, v); +} + +static __always_inline int +atomic_add_return(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return(i, v); +} + +static __always_inline int +atomic_add_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_acquire(i, v); +} + +static __always_inline int +atomic_add_return_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_release(i, v); +} + +static __always_inline int +atomic_add_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_return_relaxed(i, v); +} + +static __always_inline int +atomic_fetch_add(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add(i, v); +} + +static __always_inline int +atomic_fetch_add_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_acquire(i, v); +} + +static __always_inline int +atomic_fetch_add_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_release(i, v); +} + +static __always_inline int +atomic_fetch_add_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_sub(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_sub(i, v); +} + +static __always_inline int +atomic_sub_return(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return(i, v); +} + +static __always_inline int +atomic_sub_return_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_acquire(i, v); +} + +static __always_inline int +atomic_sub_return_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_release(i, v); +} + +static __always_inline int +atomic_sub_return_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_return_relaxed(i, v); +} + +static __always_inline int +atomic_fetch_sub(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline int +atomic_fetch_sub_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_acquire(i, v); +} + +static __always_inline int +atomic_fetch_sub_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_release(i, v); +} + +static __always_inline int +atomic_fetch_sub_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_inc(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_inc(v); +} + +static __always_inline int +atomic_inc_return(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return(v); +} + +static __always_inline int +atomic_inc_return_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_acquire(v); +} + +static __always_inline int +atomic_inc_return_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_release(v); +} + +static __always_inline int +atomic_inc_return_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_return_relaxed(v); +} + +static __always_inline int +atomic_fetch_inc(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc(v); +} + +static __always_inline int +atomic_fetch_inc_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_acquire(v); +} + +static __always_inline int +atomic_fetch_inc_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_release(v); +} + +static __always_inline int +atomic_fetch_inc_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_dec(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_dec(v); +} + +static __always_inline int +atomic_dec_return(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return(v); +} + +static __always_inline int +atomic_dec_return_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_acquire(v); +} + +static __always_inline int +atomic_dec_return_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_release(v); +} + +static __always_inline int +atomic_dec_return_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_return_relaxed(v); +} + +static __always_inline int +atomic_fetch_dec(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec(v); +} + +static __always_inline int +atomic_fetch_dec_acquire(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_acquire(v); +} + +static __always_inline int +atomic_fetch_dec_release(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_release(v); +} + +static __always_inline int +atomic_fetch_dec_relaxed(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_and(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_and(i, v); +} + +static __always_inline int +atomic_fetch_and(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and(i, v); +} + +static __always_inline int +atomic_fetch_and_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_acquire(i, v); +} + +static __always_inline int +atomic_fetch_and_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_release(i, v); +} + +static __always_inline int +atomic_fetch_and_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_andnot(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_andnot(i, v); +} + +static __always_inline int +atomic_fetch_andnot(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot(i, v); +} + +static __always_inline int +atomic_fetch_andnot_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline int +atomic_fetch_andnot_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_release(i, v); +} + +static __always_inline int +atomic_fetch_andnot_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_or(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_or(i, v); +} + +static __always_inline int +atomic_fetch_or(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or(i, v); +} + +static __always_inline int +atomic_fetch_or_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_acquire(i, v); +} + +static __always_inline int +atomic_fetch_or_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_release(i, v); +} + +static __always_inline int +atomic_fetch_or_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_xor(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_xor(i, v); +} + +static __always_inline int +atomic_fetch_xor(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline int +atomic_fetch_xor_acquire(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_acquire(i, v); +} + +static __always_inline int +atomic_fetch_xor_release(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_release(i, v); +} + +static __always_inline int +atomic_fetch_xor_relaxed(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline int +atomic_xchg(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg(v, i); +} + +static __always_inline int +atomic_xchg_acquire(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_acquire(v, i); +} + +static __always_inline int +atomic_xchg_release(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_release(v, i); +} + +static __always_inline int +atomic_xchg_relaxed(atomic_t *v, int i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_xchg_relaxed(v, i); +} + +static __always_inline int +atomic_cmpxchg(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_acquire(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_release(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_release(v, old, new); +} + +static __always_inline int +atomic_cmpxchg_relaxed(atomic_t *v, int old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_sub_and_test(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_sub_and_test(i, v); +} + +static __always_inline bool +atomic_dec_and_test(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_and_test(v); +} + +static __always_inline bool +atomic_inc_and_test(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_and_test(v); +} + +static __always_inline bool +atomic_add_negative(int i, atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_negative(i, v); +} + +static __always_inline int +atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_add_unless(atomic_t *v, int a, int u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_add_unless(v, a, u); +} + +static __always_inline bool +atomic_inc_not_zero(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_not_zero(v); +} + +static __always_inline bool +atomic_inc_unless_negative(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_inc_unless_negative(v); +} + +static __always_inline bool +atomic_dec_unless_positive(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_unless_positive(v); +} + +static __always_inline int +atomic_dec_if_positive(atomic_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_dec_if_positive(v); +} + +static __always_inline s64 +atomic64_read(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read(v); +} + +static __always_inline s64 +atomic64_read_acquire(const atomic64_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic64_read_acquire(v); +} + +static __always_inline void +atomic64_set(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set(v, i); +} + +static __always_inline void +atomic64_set_release(atomic64_t *v, s64 i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic64_set_release(v, i); +} + +static __always_inline void +atomic64_add(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_add(i, v); +} + +static __always_inline s64 +atomic64_add_return(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return(i, v); +} + +static __always_inline s64 +atomic64_add_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_acquire(i, v); +} + +static __always_inline s64 +atomic64_add_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_release(i, v); +} + +static __always_inline s64 +atomic64_add_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_return_relaxed(i, v); +} + +static __always_inline s64 +atomic64_fetch_add(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic64_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_sub(i, v); +} + +static __always_inline s64 +atomic64_sub_return(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return(i, v); +} + +static __always_inline s64 +atomic64_sub_return_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_acquire(i, v); +} + +static __always_inline s64 +atomic64_sub_return_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_release(i, v); +} + +static __always_inline s64 +atomic64_sub_return_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_return_relaxed(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_sub_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic64_inc(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_inc(v); +} + +static __always_inline s64 +atomic64_inc_return(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return(v); +} + +static __always_inline s64 +atomic64_inc_return_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_acquire(v); +} + +static __always_inline s64 +atomic64_inc_return_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_release(v); +} + +static __always_inline s64 +atomic64_inc_return_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_return_relaxed(v); +} + +static __always_inline s64 +atomic64_fetch_inc(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc(v); +} + +static __always_inline s64 +atomic64_fetch_inc_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_acquire(v); +} + +static __always_inline s64 +atomic64_fetch_inc_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_release(v); +} + +static __always_inline s64 +atomic64_fetch_inc_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic64_dec(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_dec(v); +} + +static __always_inline s64 +atomic64_dec_return(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return(v); +} + +static __always_inline s64 +atomic64_dec_return_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_acquire(v); +} + +static __always_inline s64 +atomic64_dec_return_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_release(v); +} + +static __always_inline s64 +atomic64_dec_return_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_return_relaxed(v); +} + +static __always_inline s64 +atomic64_fetch_dec(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec(v); +} + +static __always_inline s64 +atomic64_fetch_dec_acquire(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_acquire(v); +} + +static __always_inline s64 +atomic64_fetch_dec_release(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_release(v); +} + +static __always_inline s64 +atomic64_fetch_dec_relaxed(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic64_and(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_and(i, v); +} + +static __always_inline s64 +atomic64_fetch_and(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_and_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic64_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_andnot(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic64_or(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_or(i, v); +} + +static __always_inline s64 +atomic64_fetch_or(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_or_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic64_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic64_xor(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_release(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_release(i, v); +} + +static __always_inline s64 +atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline s64 +atomic64_xchg(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg(v, i); +} + +static __always_inline s64 +atomic64_xchg_acquire(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_acquire(v, i); +} + +static __always_inline s64 +atomic64_xchg_release(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_release(v, i); +} + +static __always_inline s64 +atomic64_xchg_relaxed(atomic64_t *v, s64 i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_xchg_relaxed(v, i); +} + +static __always_inline s64 +atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline s64 +atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic64_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic64_sub_and_test(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_sub_and_test(i, v); +} + +static __always_inline bool +atomic64_dec_and_test(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_and_test(v); +} + +static __always_inline bool +atomic64_inc_and_test(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_and_test(v); +} + +static __always_inline bool +atomic64_add_negative(s64 i, atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_negative(i, v); +} + +static __always_inline s64 +atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic64_add_unless(atomic64_t *v, s64 a, s64 u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_add_unless(v, a, u); +} + +static __always_inline bool +atomic64_inc_not_zero(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_not_zero(v); +} + +static __always_inline bool +atomic64_inc_unless_negative(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_inc_unless_negative(v); +} + +static __always_inline bool +atomic64_dec_unless_positive(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_unless_positive(v); +} + +static __always_inline s64 +atomic64_dec_if_positive(atomic64_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic64_dec_if_positive(v); +} + +static __always_inline long +atomic_long_read(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read(v); +} + +static __always_inline long +atomic_long_read_acquire(const atomic_long_t *v) +{ + instrument_atomic_read(v, sizeof(*v)); + return arch_atomic_long_read_acquire(v); +} + +static __always_inline void +atomic_long_set(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set(v, i); +} + +static __always_inline void +atomic_long_set_release(atomic_long_t *v, long i) +{ + instrument_atomic_write(v, sizeof(*v)); + arch_atomic_long_set_release(v, i); +} + +static __always_inline void +atomic_long_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_add(i, v); +} + +static __always_inline long +atomic_long_add_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return(i, v); +} + +static __always_inline long +atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_acquire(i, v); +} + +static __always_inline long +atomic_long_add_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_release(i, v); +} + +static __always_inline long +atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_add(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add(i, v); +} + +static __always_inline long +atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_release(i, v); +} + +static __always_inline long +atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_relaxed(i, v); +} + +static __always_inline void +atomic_long_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_sub(i, v); +} + +static __always_inline long +atomic_long_sub_return(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return(i, v); +} + +static __always_inline long +atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_acquire(i, v); +} + +static __always_inline long +atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_release(i, v); +} + +static __always_inline long +atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_return_relaxed(i, v); +} + +static __always_inline long +atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_release(i, v); +} + +static __always_inline long +atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_sub_relaxed(i, v); +} + +static __always_inline void +atomic_long_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_inc(v); +} + +static __always_inline long +atomic_long_inc_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return(v); +} + +static __always_inline long +atomic_long_inc_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_acquire(v); +} + +static __always_inline long +atomic_long_inc_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_release(v); +} + +static __always_inline long +atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_inc(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc(v); +} + +static __always_inline long +atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_acquire(v); +} + +static __always_inline long +atomic_long_fetch_inc_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_release(v); +} + +static __always_inline long +atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_inc_relaxed(v); +} + +static __always_inline void +atomic_long_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_dec(v); +} + +static __always_inline long +atomic_long_dec_return(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return(v); +} + +static __always_inline long +atomic_long_dec_return_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_acquire(v); +} + +static __always_inline long +atomic_long_dec_return_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_release(v); +} + +static __always_inline long +atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_return_relaxed(v); +} + +static __always_inline long +atomic_long_fetch_dec(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec(v); +} + +static __always_inline long +atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_acquire(v); +} + +static __always_inline long +atomic_long_fetch_dec_release(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_release(v); +} + +static __always_inline long +atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_dec_relaxed(v); +} + +static __always_inline void +atomic_long_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and(i, v); +} + +static __always_inline long +atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_release(i, v); +} + +static __always_inline long +atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_and_relaxed(i, v); +} + +static __always_inline void +atomic_long_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_release(i, v); +} + +static __always_inline long +atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +atomic_long_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or(i, v); +} + +static __always_inline long +atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_release(i, v); +} + +static __always_inline long +atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_or_relaxed(i, v); +} + +static __always_inline void +atomic_long_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + arch_atomic_long_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_acquire(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_release(i, v); +} + +static __always_inline long +atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_xor_relaxed(i, v); +} + +static __always_inline long +atomic_long_xchg(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg(v, i); +} + +static __always_inline long +atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_acquire(v, i); +} + +static __always_inline long +atomic_long_xchg_release(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_release(v, i); +} + +static __always_inline long +atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_xchg_relaxed(v, i); +} + +static __always_inline long +atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_release(v, old, new); +} + +static __always_inline long +atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_acquire(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_release(v, old, new); +} + +static __always_inline bool +atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + instrument_atomic_read_write(v, sizeof(*v)); + instrument_atomic_read_write(old, sizeof(*old)); + return arch_atomic_long_try_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_sub_and_test(i, v); +} + +static __always_inline bool +atomic_long_dec_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_and_test(v); +} + +static __always_inline bool +atomic_long_inc_and_test(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_and_test(v); +} + +static __always_inline bool +atomic_long_add_negative(long i, atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_negative(i, v); +} + +static __always_inline long +atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_fetch_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_add_unless(v, a, u); +} + +static __always_inline bool +atomic_long_inc_not_zero(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_not_zero(v); +} + +static __always_inline bool +atomic_long_inc_unless_negative(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_inc_unless_negative(v); +} + +static __always_inline bool +atomic_long_dec_unless_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_unless_positive(v); +} + +static __always_inline long +atomic_long_dec_if_positive(atomic_long_t *v) +{ + instrument_atomic_read_write(v, sizeof(*v)); + return arch_atomic_long_dec_if_positive(v); +} + +#define xchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define xchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_acquire(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_release(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_relaxed(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ +}) + +#define try_cmpxchg(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_acquire(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_release(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg_relaxed(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define cmpxchg_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg64_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ +}) + +#define sync_cmpxchg(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ +}) + +#define cmpxchg_double(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ +}) + + +#define cmpxchg_double_local(ptr, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ +}) + +#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ +// 2a9553f0a9d5619f19151092df5cabbbf16ce835 diff --git a/include/linux/atomic/atomic-long.h b/include/linux/atomic/atomic-long.h new file mode 100644 index 000000000000..800b8c35992d --- /dev/null +++ b/include/linux/atomic/atomic-long.h @@ -0,0 +1,1014 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Generated by scripts/atomic/gen-atomic-long.sh +// DO NOT MODIFY THIS FILE DIRECTLY + +#ifndef _LINUX_ATOMIC_LONG_H +#define _LINUX_ATOMIC_LONG_H + +#include <linux/compiler.h> +#include <asm/types.h> + +#ifdef CONFIG_64BIT +typedef atomic64_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) +#define atomic_long_cond_read_acquire atomic64_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic64_cond_read_relaxed +#else +typedef atomic_t atomic_long_t; +#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) +#define atomic_long_cond_read_acquire atomic_cond_read_acquire +#define atomic_long_cond_read_relaxed atomic_cond_read_relaxed +#endif + +#ifdef CONFIG_64BIT + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic64_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic64_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic64_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic64_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic64_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic64_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic64_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic64_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic64_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic64_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic64_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic64_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic64_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic64_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic64_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic64_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic64_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic64_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic64_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic64_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic64_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic64_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_acquire(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_release(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic64_try_cmpxchg_relaxed(v, (s64 *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic64_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic64_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic64_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic64_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic64_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic64_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic64_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic64_dec_if_positive(v); +} + +#else /* CONFIG_64BIT */ + +static __always_inline long +arch_atomic_long_read(const atomic_long_t *v) +{ + return arch_atomic_read(v); +} + +static __always_inline long +arch_atomic_long_read_acquire(const atomic_long_t *v) +{ + return arch_atomic_read_acquire(v); +} + +static __always_inline void +arch_atomic_long_set(atomic_long_t *v, long i) +{ + arch_atomic_set(v, i); +} + +static __always_inline void +arch_atomic_long_set_release(atomic_long_t *v, long i) +{ + arch_atomic_set_release(v, i); +} + +static __always_inline void +arch_atomic_long_add(long i, atomic_long_t *v) +{ + arch_atomic_add(i, v); +} + +static __always_inline long +arch_atomic_long_add_return(long i, atomic_long_t *v) +{ + return arch_atomic_add_return(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_add_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_add_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_add_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_sub(long i, atomic_long_t *v) +{ + arch_atomic_sub(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_release(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_release(i, v); +} + +static __always_inline long +arch_atomic_long_sub_return_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_sub_return_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_sub_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_sub_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_inc(atomic_long_t *v) +{ + arch_atomic_inc(v); +} + +static __always_inline long +arch_atomic_long_inc_return(atomic_long_t *v) +{ + return arch_atomic_inc_return(v); +} + +static __always_inline long +arch_atomic_long_inc_return_acquire(atomic_long_t *v) +{ + return arch_atomic_inc_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_inc_return_release(atomic_long_t *v) +{ + return arch_atomic_inc_return_release(v); +} + +static __always_inline long +arch_atomic_long_inc_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_inc_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc(atomic_long_t *v) +{ + return arch_atomic_fetch_inc(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_release(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_inc_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_inc_relaxed(v); +} + +static __always_inline void +arch_atomic_long_dec(atomic_long_t *v) +{ + arch_atomic_dec(v); +} + +static __always_inline long +arch_atomic_long_dec_return(atomic_long_t *v) +{ + return arch_atomic_dec_return(v); +} + +static __always_inline long +arch_atomic_long_dec_return_acquire(atomic_long_t *v) +{ + return arch_atomic_dec_return_acquire(v); +} + +static __always_inline long +arch_atomic_long_dec_return_release(atomic_long_t *v) +{ + return arch_atomic_dec_return_release(v); +} + +static __always_inline long +arch_atomic_long_dec_return_relaxed(atomic_long_t *v) +{ + return arch_atomic_dec_return_relaxed(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec(atomic_long_t *v) +{ + return arch_atomic_fetch_dec(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_acquire(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_acquire(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_release(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_release(v); +} + +static __always_inline long +arch_atomic_long_fetch_dec_relaxed(atomic_long_t *v) +{ + return arch_atomic_fetch_dec_relaxed(v); +} + +static __always_inline void +arch_atomic_long_and(long i, atomic_long_t *v) +{ + arch_atomic_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_and_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_and_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_andnot(long i, atomic_long_t *v) +{ + arch_atomic_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_andnot_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_andnot_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_or(long i, atomic_long_t *v) +{ + arch_atomic_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_or_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_or_relaxed(i, v); +} + +static __always_inline void +arch_atomic_long_xor(long i, atomic_long_t *v) +{ + arch_atomic_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_acquire(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_acquire(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_release(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_release(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v) +{ + return arch_atomic_fetch_xor_relaxed(i, v); +} + +static __always_inline long +arch_atomic_long_xchg(atomic_long_t *v, long i) +{ + return arch_atomic_xchg(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_acquire(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_acquire(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_release(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_release(v, i); +} + +static __always_inline long +arch_atomic_long_xchg_relaxed(atomic_long_t *v, long i) +{ + return arch_atomic_xchg_relaxed(v, i); +} + +static __always_inline long +arch_atomic_long_cmpxchg(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_acquire(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_release(v, old, new); +} + +static __always_inline long +arch_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new) +{ + return arch_atomic_cmpxchg_relaxed(v, old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_acquire(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_release(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_try_cmpxchg_relaxed(atomic_long_t *v, long *old, long new) +{ + return arch_atomic_try_cmpxchg_relaxed(v, (int *)old, new); +} + +static __always_inline bool +arch_atomic_long_sub_and_test(long i, atomic_long_t *v) +{ + return arch_atomic_sub_and_test(i, v); +} + +static __always_inline bool +arch_atomic_long_dec_and_test(atomic_long_t *v) +{ + return arch_atomic_dec_and_test(v); +} + +static __always_inline bool +arch_atomic_long_inc_and_test(atomic_long_t *v) +{ + return arch_atomic_inc_and_test(v); +} + +static __always_inline bool +arch_atomic_long_add_negative(long i, atomic_long_t *v) +{ + return arch_atomic_add_negative(i, v); +} + +static __always_inline long +arch_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_fetch_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_add_unless(atomic_long_t *v, long a, long u) +{ + return arch_atomic_add_unless(v, a, u); +} + +static __always_inline bool +arch_atomic_long_inc_not_zero(atomic_long_t *v) +{ + return arch_atomic_inc_not_zero(v); +} + +static __always_inline bool +arch_atomic_long_inc_unless_negative(atomic_long_t *v) +{ + return arch_atomic_inc_unless_negative(v); +} + +static __always_inline bool +arch_atomic_long_dec_unless_positive(atomic_long_t *v) +{ + return arch_atomic_dec_unless_positive(v); +} + +static __always_inline long +arch_atomic_long_dec_if_positive(atomic_long_t *v) +{ + return arch_atomic_dec_if_positive(v); +} + +#endif /* CONFIG_64BIT */ +#endif /* _LINUX_ATOMIC_LONG_H */ +// e8f0e08ff072b74d180eabe2ad001282b38c2c88 diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index 1d7edad9914f..33207004cfde 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -116,6 +116,7 @@ struct bdi_writeback { struct list_head b_dirty_time; /* time stamps are dirty */ spinlock_t list_lock; /* protects the b_* lists */ + atomic_t writeback_inodes; /* number of inodes under writeback */ struct percpu_counter stat[NR_WB_STAT_ITEMS]; unsigned long congested; /* WB_[a]sync_congested flags */ @@ -142,6 +143,7 @@ struct bdi_writeback { spinlock_t work_lock; /* protects work_list & dwork scheduling */ struct list_head work_list; struct delayed_work dwork; /* work item used for writeback */ + struct delayed_work bw_dwork; /* work item used for bandwidth estimate */ unsigned long dirty_sleep; /* last wait */ diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 44df4fcef65c..ac7f231b8825 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -143,7 +143,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) sb = inode->i_sb; #ifdef CONFIG_BLOCK if (sb_is_blkdev_sb(sb)) - return I_BDEV(inode)->bd_bdi; + return I_BDEV(inode)->bd_disk->bdi; #endif return sb->s_bdi; } @@ -288,6 +288,17 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) return inode->i_wb; } +static inline struct bdi_writeback *inode_to_wb_wbc( + struct inode *inode, + struct writeback_control *wbc) +{ + /* + * If wbc does not have inode attached, it means cgroup writeback was + * disabled when wbc started. Just use the default wb in that case. + */ + return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; +} + /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode @@ -366,6 +377,14 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) return &inode_to_bdi(inode)->wb; } +static inline struct bdi_writeback *inode_to_wb_wbc( + struct inode *inode, + struct writeback_control *wbc) +{ + return inode_to_wb(inode); +} + + static inline struct bdi_writeback * unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { diff --git a/include/linux/bio.h b/include/linux/bio.h index 2203b686e1f0..00952e92eae1 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -5,7 +5,6 @@ #ifndef __LINUX_BIO_H #define __LINUX_BIO_H -#include <linux/highmem.h> #include <linux/mempool.h> #include <linux/ioprio.h> /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ @@ -375,7 +374,7 @@ static inline void bip_set_seed(struct bio_integrity_payload *bip, #endif /* CONFIG_BLK_DEV_INTEGRITY */ -extern void bio_trim(struct bio *bio, int offset, int size); +void bio_trim(struct bio *bio, sector_t offset, sector_t size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); @@ -401,6 +400,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors, enum { BIOSET_NEED_BVECS = BIT(0), BIOSET_NEED_RESCUER = BIT(1), + BIOSET_PERCPU_CACHE = BIT(2), }; extern int bioset_init(struct bio_set *, unsigned int, unsigned int, int flags); extern void bioset_exit(struct bio_set *); @@ -409,6 +409,8 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src); struct bio *bio_alloc_bioset(gfp_t gfp, unsigned short nr_iovecs, struct bio_set *bs); +struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, + struct bio_set *bs); struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs); extern void bio_put(struct bio *); @@ -519,47 +521,6 @@ static inline void bio_clone_blkg_association(struct bio *dst, struct bio *src) { } #endif /* CONFIG_BLK_CGROUP */ -#ifdef CONFIG_HIGHMEM -/* - * remember never ever reenable interrupts between a bvec_kmap_irq and - * bvec_kunmap_irq! - */ -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) -{ - unsigned long addr; - - /* - * might not be a highmem page, but the preempt/irq count - * balancing is a lot nicer this way - */ - local_irq_save(*flags); - addr = (unsigned long) kmap_atomic(bvec->bv_page); - - BUG_ON(addr & ~PAGE_MASK); - - return (char *) addr + bvec->bv_offset; -} - -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) -{ - unsigned long ptr = (unsigned long) buffer & PAGE_MASK; - - kunmap_atomic((void *) ptr); - local_irq_restore(*flags); -} - -#else -static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) -{ - return page_address(bvec->bv_page) + bvec->bv_offset; -} - -static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) -{ - *flags = 0; -} -#endif - /* * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. * @@ -699,6 +660,11 @@ struct bio_set { struct kmem_cache *bio_slab; unsigned int front_pad; + /* + * per-cpu bio alloc cache + */ + struct bio_alloc_cache __percpu *cache; + mempool_t bio_pool; mempool_t bvec_pool; #if defined(CONFIG_BLK_DEV_INTEGRITY) @@ -715,6 +681,11 @@ struct bio_set { struct bio_list rescue_list; struct work_struct rescue_work; struct workqueue_struct *rescue_workqueue; + + /* + * Hot un-plug notifier for the per-cpu cache, if used + */ + struct hlist_node cpuhp_dead; }; static inline bool bioset_initialized(struct bio_set *bs) diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index a36cfcec4e77..37f36dad18bd 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -227,6 +227,12 @@ unsigned int bitmap_ord_to_pos(const unsigned long *bitmap, unsigned int ord, un int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits); +extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); + +extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, + int nmaskbits, loff_t off, size_t count); + #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 26bf15e6cd35..5e62e2383b7f 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h @@ -4,6 +4,7 @@ #include <asm/types.h> #include <linux/bits.h> +#include <linux/typecheck.h> #include <uapi/linux/kernel.h> @@ -253,6 +254,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, __clear_bit(nr, addr); } +/** + * __ptr_set_bit - Set bit in a pointer's value + * @nr: the bit to set + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * __ptr_set_bit(bit, &p); + */ +#define __ptr_set_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __set_bit(nr, (unsigned long *)(addr)); \ + }) + +/** + * __ptr_clear_bit - Clear bit in a pointer's value + * @nr: the bit to clear + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * __ptr_clear_bit(bit, &p); + */ +#define __ptr_clear_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + __clear_bit(nr, (unsigned long *)(addr)); \ + }) + +/** + * __ptr_test_bit - Test bit in a pointer's value + * @nr: the bit to test + * @addr: the address of the pointer variable + * + * Example: + * void *p = foo(); + * if (__ptr_test_bit(bit, &p)) { + * ... + * } else { + * ... + * } + */ +#define __ptr_test_bit(nr, addr) \ + ({ \ + typecheck_pointer(*(addr)); \ + test_bit(nr, (unsigned long *)(addr)); \ + }) + #ifdef __KERNEL__ #ifndef set_mask_bits diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index 37048438872c..b4de2010fba5 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h @@ -152,8 +152,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd); typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd); -typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf, - size_t size); +typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, + struct seq_file *s); struct blkcg_policy { int plid; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 1d18447ebebc..13ba1861e688 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -404,7 +404,13 @@ enum { BLK_MQ_F_STACKING = 1 << 2, BLK_MQ_F_TAG_HCTX_SHARED = 1 << 3, BLK_MQ_F_BLOCKING = 1 << 5, + /* Do not allow an I/O scheduler to be configured. */ BLK_MQ_F_NO_SCHED = 1 << 6, + /* + * Select 'none' during queue registration in case of a single hwq + * or shared hwqs instead of 'mq-deadline'. + */ + BLK_MQ_F_NO_SCHED_BY_DEFAULT = 1 << 7, BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, BLK_MQ_F_ALLOC_POLICY_BITS = 1, @@ -426,18 +432,14 @@ enum { ((policy & ((1 << BLK_MQ_F_ALLOC_POLICY_BITS) - 1)) \ << BLK_MQ_F_ALLOC_POLICY_START_BIT) +struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, void *queuedata, + struct lock_class_key *lkclass); #define blk_mq_alloc_disk(set, queuedata) \ ({ \ static struct lock_class_key __key; \ - struct gendisk *__disk = __blk_mq_alloc_disk(set, queuedata); \ \ - if (!IS_ERR(__disk)) \ - lockdep_init_map(&__disk->lockdep_map, \ - "(bio completion)", &__key, 0); \ - __disk; \ + __blk_mq_alloc_disk(set, queuedata, &__key); \ }) -struct gendisk *__blk_mq_alloc_disk(struct blk_mq_tag_set *set, - void *queuedata); struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *); int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set, struct request_queue *q); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 290f9061b29a..be622b5a21ed 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -34,14 +34,10 @@ struct block_device { void * bd_holder; int bd_holders; bool bd_write_holder; -#ifdef CONFIG_SYSFS - struct list_head bd_holder_disks; -#endif struct kobject *bd_holder_dir; u8 bd_partno; spinlock_t bd_size_lock; /* for bd_inode->i_size updates */ struct gendisk * bd_disk; - struct backing_dev_info *bd_bdi; /* The counter of freeze processes */ int bd_fsfreeze_count; @@ -281,6 +277,7 @@ struct bio { }; #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs) +#define BIO_MAX_SECTORS (UINT_MAX >> SECTOR_SHIFT) /* * bio flags @@ -301,6 +298,7 @@ enum { BIO_TRACKED, /* set if bio goes through the rq_qos path */ BIO_REMAPPED, BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */ + BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */ BIO_FLAG_LAST }; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d3afea47ade6..12b9dbcc980e 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -11,14 +11,12 @@ #include <linux/minmax.h> #include <linux/timer.h> #include <linux/workqueue.h> -#include <linux/backing-dev-defs.h> #include <linux/wait.h> #include <linux/mempool.h> #include <linux/pfn.h> #include <linux/bio.h> #include <linux/stringify.h> #include <linux/gfp.h> -#include <linux/bsg.h> #include <linux/smp.h> #include <linux/rcupdate.h> #include <linux/percpu-refcount.h> @@ -28,14 +26,11 @@ #include <linux/sbitmap.h> struct module; -struct scsi_ioctl_command; - struct request_queue; struct elevator_queue; struct blk_trace; struct request; struct sg_io_hdr; -struct bsg_job; struct blkcg_gq; struct blk_flush_queue; struct pr_ops; @@ -275,9 +270,6 @@ enum blk_queue_state { #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ -#define BLK_SCSI_MAX_CMDS (256) -#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) - /* * Zoned block device models (zoned limit). * @@ -398,8 +390,6 @@ struct request_queue { struct blk_mq_hw_ctx **queue_hw_ctx; unsigned int nr_hw_queues; - struct backing_dev_info *backing_dev_info; - /* * The queue owner gets to use this for whatever they like. * ll_rw_blk doesn't touch it. @@ -424,6 +414,8 @@ struct request_queue { spinlock_t queue_lock; + struct gendisk *disk; + /* * queue kobject */ @@ -506,11 +498,6 @@ struct request_queue { unsigned int max_active_zones; #endif /* CONFIG_BLK_DEV_ZONED */ - /* - * sg stuff - */ - unsigned int sg_timeout; - unsigned int sg_reserved_size; int node; struct mutex debugfs_mutex; #ifdef CONFIG_BLK_DEV_IO_TRACE @@ -537,10 +524,6 @@ struct request_queue { int mq_freeze_depth; -#if defined(CONFIG_BLK_DEV_BSG) - struct bsg_class_device bsg_dev; -#endif - #ifdef CONFIG_BLK_DEV_THROTTLING /* Throttle data */ struct throtl_data *td; @@ -664,8 +647,6 @@ extern void blk_clear_pm_only(struct request_queue *q); dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \ (dir), (attrs)) -#define queue_to_disk(q) (dev_to_disk(kobj_to_dev((q)->kobj.parent))) - static inline bool queue_is_mq(struct request_queue *q) { return q->mq_ops; @@ -888,16 +869,6 @@ extern blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq); int blk_rq_append_bio(struct request *rq, struct bio *bio); extern void blk_queue_split(struct bio **); -extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int); -extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t, - unsigned int, void __user *); -extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, - unsigned int, void __user *); -extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, - struct scsi_ioctl_command __user *); -extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp); -extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp); - extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); extern void blk_queue_exit(struct request_queue *q); extern void blk_sync_queue(struct request_queue *q); @@ -941,6 +912,10 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev) #define SECTOR_SIZE (1 << SECTOR_SHIFT) #endif +#define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) +#define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) +#define SECTOR_MASK (PAGE_SECTORS - 1) + /* * blk_rq_pos() : the current sector * blk_rq_bytes() : bytes left in the entire request @@ -1139,7 +1114,7 @@ void blk_queue_zone_write_granularity(struct request_queue *q, unsigned int size); extern void blk_queue_alignment_offset(struct request_queue *q, unsigned int alignment); -void blk_queue_update_readahead(struct request_queue *q); +void disk_update_readahead(struct gendisk *disk); extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); extern void blk_queue_io_min(struct request_queue *q, unsigned int min); extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); @@ -1346,8 +1321,6 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, gfp_mask, 0); } -extern int blk_verify_command(unsigned char *cmd, fmode_t mode); - static inline bool bdev_is_partition(struct block_device *bdev) { return bdev->bd_partno; @@ -1376,6 +1349,11 @@ static inline unsigned int queue_max_sectors(const struct request_queue *q) return q->limits.max_sectors; } +static inline unsigned int queue_max_bytes(struct request_queue *q) +{ + return min_t(unsigned int, queue_max_sectors(q), INT_MAX >> 9) << 9; +} + static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) { return q->limits.max_hw_sectors; @@ -1521,6 +1499,22 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector return offset << SECTOR_SHIFT; } +/* + * Two cases of handling DISCARD merge: + * If max_discard_segments > 1, the driver takes every bio + * as a range and send them to controller together. The ranges + * needn't to be contiguous. + * Otherwise, the bios/requests will be handled as same as + * others which should be contiguous. + */ +static inline bool blk_discard_mergable(struct request *req) +{ + if (req_op(req) == REQ_OP_DISCARD && + queue_max_discard_segments(req->q) > 1) + return true; + return false; +} + static inline int bdev_discard_alignment(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); @@ -1855,6 +1849,13 @@ struct block_device_operations { char *(*devnode)(struct gendisk *disk, umode_t *mode); struct module *owner; const struct pr_ops *pr_ops; + + /* + * Special callback for probing GPT entry at a given sector. + * Needed by Android devices, used by GPT scanner and MMC blk + * driver. + */ + int (*alternative_gpt_sector)(struct gendisk *disk, sector_t *sector); }; #ifdef CONFIG_COMPAT @@ -1984,8 +1985,6 @@ void blkdev_put_no_open(struct block_device *bdev); struct block_device *bdev_alloc(struct gendisk *disk, u8 partno); void bdev_add(struct block_device *bdev, dev_t dev); struct block_device *I_BDEV(struct inode *inode); -struct block_device *bdgrab(struct block_device *bdev); -void bdput(struct block_device *); int truncate_bdev_range(struct block_device *bdev, fmode_t mode, loff_t lstart, loff_t lend); diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h index abe089c27529..537e1b991f11 100644 --- a/include/linux/bootconfig.h +++ b/include/linux/bootconfig.h @@ -110,7 +110,7 @@ static inline __init bool xbc_node_is_leaf(struct xbc_node *node) } /* Tree-based key-value access APIs */ -struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent, +struct xbc_node * __init xbc_node_find_subkey(struct xbc_node *parent, const char *key); const char * __init xbc_node_find_value(struct xbc_node *parent, @@ -148,7 +148,7 @@ xbc_find_value(const char *key, struct xbc_node **vnode) */ static inline struct xbc_node * __init xbc_find_node(const char *key) { - return xbc_node_find_child(NULL, key); + return xbc_node_find_subkey(NULL, key); } /** diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 8b77d08d4b47..2746fd804216 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -23,22 +23,73 @@ struct ctl_table_header; struct task_struct; #ifdef CONFIG_CGROUP_BPF +enum cgroup_bpf_attach_type { + CGROUP_BPF_ATTACH_TYPE_INVALID = -1, + CGROUP_INET_INGRESS = 0, + CGROUP_INET_EGRESS, + CGROUP_INET_SOCK_CREATE, + CGROUP_SOCK_OPS, + CGROUP_DEVICE, + CGROUP_INET4_BIND, + CGROUP_INET6_BIND, + CGROUP_INET4_CONNECT, + CGROUP_INET6_CONNECT, + CGROUP_INET4_POST_BIND, + CGROUP_INET6_POST_BIND, + CGROUP_UDP4_SENDMSG, + CGROUP_UDP6_SENDMSG, + CGROUP_SYSCTL, + CGROUP_UDP4_RECVMSG, + CGROUP_UDP6_RECVMSG, + CGROUP_GETSOCKOPT, + CGROUP_SETSOCKOPT, + CGROUP_INET4_GETPEERNAME, + CGROUP_INET6_GETPEERNAME, + CGROUP_INET4_GETSOCKNAME, + CGROUP_INET6_GETSOCKNAME, + CGROUP_INET_SOCK_RELEASE, + MAX_CGROUP_BPF_ATTACH_TYPE +}; -extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE]; -#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type]) +#define CGROUP_ATYPE(type) \ + case BPF_##type: return type -#define BPF_CGROUP_STORAGE_NEST_MAX 8 +static inline enum cgroup_bpf_attach_type +to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) +{ + switch (attach_type) { + CGROUP_ATYPE(CGROUP_INET_INGRESS); + CGROUP_ATYPE(CGROUP_INET_EGRESS); + CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE); + CGROUP_ATYPE(CGROUP_SOCK_OPS); + CGROUP_ATYPE(CGROUP_DEVICE); + CGROUP_ATYPE(CGROUP_INET4_BIND); + CGROUP_ATYPE(CGROUP_INET6_BIND); + CGROUP_ATYPE(CGROUP_INET4_CONNECT); + CGROUP_ATYPE(CGROUP_INET6_CONNECT); + CGROUP_ATYPE(CGROUP_INET4_POST_BIND); + CGROUP_ATYPE(CGROUP_INET6_POST_BIND); + CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); + CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); + CGROUP_ATYPE(CGROUP_SYSCTL); + CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); + CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); + CGROUP_ATYPE(CGROUP_GETSOCKOPT); + CGROUP_ATYPE(CGROUP_SETSOCKOPT); + CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); + CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); + CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); + CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); + default: + return CGROUP_BPF_ATTACH_TYPE_INVALID; + } +} -struct bpf_cgroup_storage_info { - struct task_struct *task; - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; -}; +#undef CGROUP_ATYPE -/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks - * to use bpf cgroup storage simultaneously. - */ -DECLARE_PER_CPU(struct bpf_cgroup_storage_info, - bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]); +extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; +#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) #define for_each_cgroup_storage_type(stype) \ for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) @@ -80,15 +131,15 @@ struct bpf_prog_array; struct cgroup_bpf { /* array of effective progs in this cgroup */ - struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE]; + struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE]; /* attached progs to this cgroup and attach flags * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will * have either zero or one element * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS */ - struct list_head progs[MAX_BPF_ATTACH_TYPE]; - u32 flags[MAX_BPF_ATTACH_TYPE]; + struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE]; + u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE]; /* list of cgroup shared storages */ struct list_head storages; @@ -128,28 +179,28 @@ int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, int __cgroup_bpf_run_filter_skb(struct sock *sk, struct sk_buff *skb, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sk(struct sock *sk, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, struct sockaddr *uaddr, - enum bpf_attach_type type, + enum cgroup_bpf_attach_type atype, void *t_ctx, u32 *flags); int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, struct bpf_sock_ops_kern *sock_ops, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, - short access, enum bpf_attach_type type); + short access, enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, struct ctl_table *table, int write, char **buf, size_t *pcount, loff_t *ppos, - enum bpf_attach_type type); + enum cgroup_bpf_attach_type atype); int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, int *optname, char __user *optval, @@ -172,44 +223,6 @@ static inline enum bpf_cgroup_storage_type cgroup_storage_type( return BPF_CGROUP_STORAGE_SHARED; } -static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage - *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) -{ - enum bpf_cgroup_storage_type stype; - int i, err = 0; - - preempt_disable(); - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL)) - continue; - - this_cpu_write(bpf_cgroup_storage_info[i].task, current); - for_each_cgroup_storage_type(stype) - this_cpu_write(bpf_cgroup_storage_info[i].storage[stype], - storage[stype]); - goto out; - } - err = -EBUSY; - WARN_ON_ONCE(1); - -out: - preempt_enable(); - return err; -} - -static inline void bpf_cgroup_storage_unset(void) -{ - int i; - - for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) { - if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current)) - continue; - - this_cpu_write(bpf_cgroup_storage_info[i].task, NULL); - return; - } -} - struct bpf_cgroup_storage * cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, void *key, bool locked); @@ -230,9 +243,9 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS)) \ + if (cgroup_bpf_enabled(CGROUP_INET_INGRESS)) \ __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ - BPF_CGROUP_INET_INGRESS); \ + CGROUP_INET_INGRESS); \ \ __ret; \ }) @@ -240,54 +253,54 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ + if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ typeof(sk) __sk = sk_to_full_sk(sk); \ if (sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ - BPF_CGROUP_INET_EGRESS); \ + CGROUP_INET_EGRESS); \ } \ __ret; \ }) -#define BPF_CGROUP_RUN_SK_PROG(sk, type) \ +#define BPF_CGROUP_RUN_SK_PROG(sk, atype) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ - __ret = __cgroup_bpf_run_filter_sk(sk, type); \ + if (cgroup_bpf_enabled(atype)) { \ + __ret = __cgroup_bpf_run_filter_sk(sk, atype); \ } \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ - BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND) + BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND) -#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \ +#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \ ({ \ u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + if (cgroup_bpf_enabled(atype)) \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ NULL, \ &__unused_flags); \ __ret; \ }) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \ +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \ ({ \ u32 __unused_flags; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ + if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ t_ctx, \ &__unused_flags); \ release_sock(sk); \ @@ -300,13 +313,13 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). */ -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags) \ +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \ ({ \ u32 __flags = 0; \ int __ret = 0; \ - if (cgroup_bpf_enabled(type)) { \ + if (cgroup_bpf_enabled(atype)) { \ lock_sock(sk); \ - __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \ + __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ NULL, &__flags); \ release_sock(sk); \ if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ @@ -316,33 +329,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \ - ((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) || \ - cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) && \ + ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \ + cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \ (sk)->sk_prot->pre_connect) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT) + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT) + BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL) #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx) #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ - BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL) + BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL) /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a * fullsock and its parent fullsock cannot be traced by @@ -362,33 +375,33 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS)) \ + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \ __ret = __cgroup_bpf_run_filter_sock_ops(sk, \ sock_ops, \ - BPF_CGROUP_SOCK_OPS); \ + CGROUP_SOCK_OPS); \ __ret; \ }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ + if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ if (__sk && sk_fullsock(__sk)) \ __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ sock_ops, \ - BPF_CGROUP_SOCK_OPS); \ + CGROUP_SOCK_OPS); \ } \ __ret; \ }) -#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \ +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE)) \ - __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \ + if (cgroup_bpf_enabled(CGROUP_DEVICE)) \ + __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \ access, \ - BPF_CGROUP_DEVICE); \ + CGROUP_DEVICE); \ \ __ret; \ }) @@ -397,10 +410,10 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL)) \ + if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \ __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ buf, count, pos, \ - BPF_CGROUP_SYSCTL); \ + CGROUP_SYSCTL); \ __ret; \ }) @@ -408,7 +421,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, kernel_optval) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ optname, optval, \ optlen, \ @@ -419,7 +432,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ ({ \ int __ret = 0; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ get_user(__ret, optlen); \ __ret; \ }) @@ -428,7 +441,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, max_optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ tcp_bpf_bypass_getsockopt, \ @@ -443,7 +456,7 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, optlen, retval) \ ({ \ int __ret = retval; \ - if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT)) \ + if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ __ret = __cgroup_bpf_run_filter_getsockopt_kern( \ sock, level, optname, optval, optlen, retval); \ __ret; \ @@ -487,9 +500,6 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, return -EINVAL; } -static inline int bpf_cgroup_storage_set( - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; } -static inline void bpf_cgroup_storage_unset(void) {} static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map) { return 0; } static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( @@ -505,14 +515,14 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, return 0; } -#define cgroup_bpf_enabled(type) (0) -#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; }) +#define cgroup_bpf_enabled(atype) (0) +#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; }) #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) @@ -524,7 +534,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) -#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) +#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e8e2b0393ca9..f4c16f19f83e 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -168,6 +168,7 @@ struct bpf_map { u32 max_entries; u32 map_flags; int spin_lock_off; /* >=0 valid offset, <0 error */ + int timer_off; /* >=0 valid offset, <0 error */ u32 id; int numa_node; u32 btf_key_type_id; @@ -197,30 +198,53 @@ static inline bool map_value_has_spin_lock(const struct bpf_map *map) return map->spin_lock_off >= 0; } -static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) +static inline bool map_value_has_timer(const struct bpf_map *map) { - if (likely(!map_value_has_spin_lock(map))) - return; - *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = - (struct bpf_spin_lock){}; + return map->timer_off >= 0; } -/* copy everything but bpf_spin_lock */ +static inline void check_and_init_map_value(struct bpf_map *map, void *dst) +{ + if (unlikely(map_value_has_spin_lock(map))) + *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = + (struct bpf_spin_lock){}; + if (unlikely(map_value_has_timer(map))) + *(struct bpf_timer *)(dst + map->timer_off) = + (struct bpf_timer){}; +} + +/* copy everything but bpf_spin_lock and bpf_timer. There could be one of each. */ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) { + u32 s_off = 0, s_sz = 0, t_off = 0, t_sz = 0; + if (unlikely(map_value_has_spin_lock(map))) { - u32 off = map->spin_lock_off; + s_off = map->spin_lock_off; + s_sz = sizeof(struct bpf_spin_lock); + } else if (unlikely(map_value_has_timer(map))) { + t_off = map->timer_off; + t_sz = sizeof(struct bpf_timer); + } - memcpy(dst, src, off); - memcpy(dst + off + sizeof(struct bpf_spin_lock), - src + off + sizeof(struct bpf_spin_lock), - map->value_size - off - sizeof(struct bpf_spin_lock)); + if (unlikely(s_sz || t_sz)) { + if (s_off < t_off || !s_sz) { + swap(s_off, t_off); + swap(s_sz, t_sz); + } + memcpy(dst, src, t_off); + memcpy(dst + t_off + t_sz, + src + t_off + t_sz, + s_off - t_off - t_sz); + memcpy(dst + s_off + s_sz, + src + s_off + s_sz, + map->value_size - s_off - s_sz); } else { memcpy(dst, src, map->value_size); } } void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, bool lock_src); +void bpf_timer_cancel_and_free(void *timer); int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); struct bpf_offload_dev; @@ -314,6 +338,7 @@ enum bpf_arg_type { ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ ARG_PTR_TO_STACK_OR_NULL, /* pointer to stack or NULL */ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ + ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ __BPF_ARG_TYPE_MAX, }; @@ -554,6 +579,11 @@ struct btf_func_model { */ #define BPF_TRAMP_F_SKIP_FRAME BIT(2) +/* Store IP address of the caller on the trampoline stack, + * so it's available for trampoline's programs. + */ +#define BPF_TRAMP_F_IP_ARG BIT(3) + /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 * bytes on x86. Pick a number to fit into BPF_IMAGE_SIZE / 2 */ @@ -1073,7 +1103,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, /* an array of programs to be executed under rcu_lock. * * Typical usage: - * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, BPF_PROG_RUN); + * ret = BPF_PROG_RUN_ARRAY(&bpf_prog_array, ctx, bpf_prog_run); * * the structure returned by bpf_prog_array_alloc() should be populated * with program pointers and the last pointer must be NULL. @@ -1084,7 +1114,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, */ struct bpf_prog_array_item { struct bpf_prog *prog; - struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + union { + struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; + u64 bpf_cookie; + }; }; struct bpf_prog_array { @@ -1110,73 +1143,133 @@ int bpf_prog_array_copy_info(struct bpf_prog_array *array, int bpf_prog_array_copy(struct bpf_prog_array *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, + u64 bpf_cookie, struct bpf_prog_array **new_array); +struct bpf_run_ctx {}; + +struct bpf_cg_run_ctx { + struct bpf_run_ctx run_ctx; + const struct bpf_prog_array_item *prog_item; +}; + +struct bpf_trace_run_ctx { + struct bpf_run_ctx run_ctx; + u64 bpf_cookie; +}; + +static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) +{ + struct bpf_run_ctx *old_ctx = NULL; + +#ifdef CONFIG_BPF_SYSCALL + old_ctx = current->bpf_ctx; + current->bpf_ctx = new_ctx; +#endif + return old_ctx; +} + +static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) +{ +#ifdef CONFIG_BPF_SYSCALL + current->bpf_ctx = old_ctx; +#endif +} + /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) /* BPF program asks to set CN on the packet. */ #define BPF_RET_SET_CN (1 << 0) -/* For BPF_PROG_RUN_ARRAY_FLAGS and __BPF_PROG_RUN_ARRAY, - * if bpf_cgroup_storage_set() failed, the rest of programs - * will not execute. This should be a really rare scenario - * as it requires BPF_CGROUP_STORAGE_NEST_MAX number of - * preemptions all between bpf_cgroup_storage_set() and - * bpf_cgroup_storage_unset() on the same cpu. - */ -#define BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, ret_flags) \ - ({ \ - struct bpf_prog_array_item *_item; \ - struct bpf_prog *_prog; \ - struct bpf_prog_array *_array; \ - u32 _ret = 1; \ - u32 func_ret; \ - migrate_disable(); \ - rcu_read_lock(); \ - _array = rcu_dereference(array); \ - _item = &_array->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ - break; \ - func_ret = func(_prog, ctx); \ - _ret &= (func_ret & 1); \ - *(ret_flags) |= (func_ret >> 1); \ - bpf_cgroup_storage_unset(); \ - _item++; \ - } \ - rcu_read_unlock(); \ - migrate_enable(); \ - _ret; \ - }) - -#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null, set_cg_storage) \ - ({ \ - struct bpf_prog_array_item *_item; \ - struct bpf_prog *_prog; \ - struct bpf_prog_array *_array; \ - u32 _ret = 1; \ - migrate_disable(); \ - rcu_read_lock(); \ - _array = rcu_dereference(array); \ - if (unlikely(check_non_null && !_array))\ - goto _out; \ - _item = &_array->items[0]; \ - while ((_prog = READ_ONCE(_item->prog))) { \ - if (!set_cg_storage) { \ - _ret &= func(_prog, ctx); \ - } else { \ - if (unlikely(bpf_cgroup_storage_set(_item->cgroup_storage))) \ - break; \ - _ret &= func(_prog, ctx); \ - bpf_cgroup_storage_unset(); \ - } \ - _item++; \ - } \ -_out: \ - rcu_read_unlock(); \ - migrate_enable(); \ - _ret; \ - }) +typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); + +static __always_inline u32 +BPF_PROG_RUN_ARRAY_CG_FLAGS(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog, + u32 *ret_flags) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + u32 ret = 1; + u32 func_ret; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + func_ret = run_prog(prog, ctx); + ret &= (func_ret & 1); + *(ret_flags) |= (func_ret >> 1); + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return ret; +} + +static __always_inline u32 +BPF_PROG_RUN_ARRAY_CG(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_cg_run_ctx run_ctx; + u32 ret = 1; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + item = &array->items[0]; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + while ((prog = READ_ONCE(item->prog))) { + run_ctx.prog_item = item; + ret &= run_prog(prog, ctx); + item++; + } + bpf_reset_run_ctx(old_run_ctx); + rcu_read_unlock(); + migrate_enable(); + return ret; +} + +static __always_inline u32 +BPF_PROG_RUN_ARRAY(const struct bpf_prog_array __rcu *array_rcu, + const void *ctx, bpf_prog_run_fn run_prog) +{ + const struct bpf_prog_array_item *item; + const struct bpf_prog *prog; + const struct bpf_prog_array *array; + struct bpf_run_ctx *old_run_ctx; + struct bpf_trace_run_ctx run_ctx; + u32 ret = 1; + + migrate_disable(); + rcu_read_lock(); + array = rcu_dereference(array_rcu); + if (unlikely(!array)) + goto out; + old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); + item = &array->items[0]; + while ((prog = READ_ONCE(item->prog))) { + run_ctx.bpf_cookie = item->bpf_cookie; + ret &= run_prog(prog, ctx); + item++; + } + bpf_reset_run_ctx(old_run_ctx); +out: + rcu_read_unlock(); + migrate_enable(); + return ret; +} /* To be used by __cgroup_bpf_run_filter_skb for EGRESS BPF progs * so BPF programs can request cwr for TCP packets. @@ -1205,7 +1298,7 @@ _out: \ u32 _flags = 0; \ bool _cn; \ u32 _ret; \ - _ret = BPF_PROG_RUN_ARRAY_FLAGS(array, ctx, func, &_flags); \ + _ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(array, ctx, func, &_flags); \ _cn = _flags & BPF_RET_SET_CN; \ if (_ret) \ _ret = (_cn ? NET_XMIT_CN : NET_XMIT_SUCCESS); \ @@ -1214,12 +1307,6 @@ _out: \ _ret; \ }) -#define BPF_PROG_RUN_ARRAY(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, false, true) - -#define BPF_PROG_RUN_ARRAY_CHECK(array, ctx, func) \ - __BPF_PROG_RUN_ARRAY(array, ctx, func, true, false) - #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); extern struct mutex bpf_stats_enabled_mutex; @@ -1398,6 +1485,9 @@ typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, struct seq_file *seq); typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, struct bpf_link_info *info); +typedef const struct bpf_func_proto * +(*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, + const struct bpf_prog *prog); enum bpf_iter_feature { BPF_ITER_RESCHED = BIT(0), @@ -1410,6 +1500,7 @@ struct bpf_iter_reg { bpf_iter_detach_target_t detach_target; bpf_iter_show_fdinfo_t show_fdinfo; bpf_iter_fill_link_info_t fill_link_info; + bpf_iter_get_func_proto_t get_func_proto; u32 ctx_arg_info_size; u32 feature; struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; @@ -1432,6 +1523,8 @@ struct bpf_iter__bpf_map_elem { int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); bool bpf_iter_prog_supported(struct bpf_prog *prog); +const struct bpf_func_proto * +bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); int bpf_iter_new_fd(struct bpf_link *link); bool bpf_link_is_iter(struct bpf_link *link); @@ -1509,12 +1602,12 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, struct bpf_prog *xdp_prog, struct bpf_map *map, bool exclude_ingress); -bool dev_map_can_have_prog(struct bpf_map *map); void __cpu_map_flush(void); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp, struct net_device *dev_rx); -bool cpu_map_prog_allowed(struct bpf_map *map); +int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, + struct sk_buff *skb); /* Return map's numa specified by userspace */ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) @@ -1711,6 +1804,12 @@ static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, return 0; } +static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, + struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + static inline bool cpu_map_prog_allowed(struct bpf_map *map) { return false; @@ -1852,6 +1951,12 @@ void bpf_map_offload_map_free(struct bpf_map *map); int bpf_prog_test_run_syscall(struct bpf_prog *prog, const union bpf_attr *kattr, union bpf_attr __user *uattr); + +int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); +int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); +int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); +void sock_map_unhash(struct sock *sk); +void sock_map_close(struct sock *sk, long timeout); #else static inline int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr) @@ -1884,24 +1989,6 @@ static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, { return -ENOTSUPP; } -#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ - -#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) -int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); -int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); -int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); -void sock_map_unhash(struct sock *sk); -void sock_map_close(struct sock *sk, long timeout); - -void bpf_sk_reuseport_detach(struct sock *sk); -int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, - void *value); -int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, - void *value, u64 map_flags); -#else -static inline void bpf_sk_reuseport_detach(struct sock *sk) -{ -} #ifdef CONFIG_BPF_SYSCALL static inline int sock_map_get_from_fd(const union bpf_attr *attr, @@ -1921,7 +2008,21 @@ static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void { return -EOPNOTSUPP; } +#endif /* CONFIG_BPF_SYSCALL */ +#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ +#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) +void bpf_sk_reuseport_detach(struct sock *sk); +int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, + void *value); +int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, + void *value, u64 map_flags); +#else +static inline void bpf_sk_reuseport_detach(struct sock *sk) +{ +} + +#ifdef CONFIG_BPF_SYSCALL static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, void *value) { @@ -1998,9 +2099,8 @@ extern const struct bpf_func_proto bpf_task_storage_get_proto; extern const struct bpf_func_proto bpf_task_storage_delete_proto; extern const struct bpf_func_proto bpf_for_each_map_elem_proto; extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; - -const struct bpf_func_proto *bpf_tracing_func_proto( - enum bpf_func_id func_id, const struct bpf_prog *prog); +extern const struct bpf_func_proto bpf_sk_setsockopt_proto; +extern const struct bpf_func_proto bpf_sk_getsockopt_proto; const struct bpf_func_proto *tracing_prog_func_proto( enum bpf_func_id func_id, const struct bpf_prog *prog); diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index a9db1eae6796..9c81724e4b98 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -134,4 +134,8 @@ BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup) BPF_LINK_TYPE(BPF_LINK_TYPE_ITER, iter) #ifdef CONFIG_NET BPF_LINK_TYPE(BPF_LINK_TYPE_NETNS, netns) +BPF_LINK_TYPE(BPF_LINK_TYPE_XDP, xdp) +#endif +#ifdef CONFIG_PERF_EVENTS +BPF_LINK_TYPE(BPF_LINK_TYPE_PERF_EVENT, perf) #endif diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e774ecc1cd1f..5424124dbe36 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -53,7 +53,14 @@ struct bpf_reg_state { /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | * PTR_TO_MAP_VALUE_OR_NULL */ - struct bpf_map *map_ptr; + struct { + struct bpf_map *map_ptr; + /* To distinguish map lookups from outer map + * the map_uid is non-zero for registers + * pointing to inner maps. + */ + u32 map_uid; + }; /* for PTR_TO_BTF_ID */ struct { @@ -201,12 +208,19 @@ struct bpf_func_state { * zero == main subprog */ u32 subprogno; + /* Every bpf_timer_start will increment async_entry_cnt. + * It's used to distinguish: + * void foo(void) { for(;;); } + * void foo(void) { bpf_timer_set_callback(,foo); } + */ + u32 async_entry_cnt; + bool in_callback_fn; + bool in_async_callback_fn; /* The following fields should be last. See copy_func_state() */ int acquired_refs; struct bpf_reference_state *refs; int allocated_stack; - bool in_callback_fn; struct bpf_stack_state *stack; }; @@ -340,8 +354,8 @@ struct bpf_insn_aux_data { }; u64 map_key_state; /* constant (32 bit) key tracking for maps */ int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ - int sanitize_stack_off; /* stack slot to be cleared */ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ + bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ bool zext_dst; /* this insn zero extends dst reg */ u8 alu_state; /* used in combination with alu_limit */ @@ -392,6 +406,7 @@ struct bpf_subprog_info { bool has_tail_call; bool tail_call_reachable; bool has_ld_abs; + bool is_async_cb; }; /* single container for all structs @@ -414,6 +429,7 @@ struct bpf_verifier_env { u32 used_map_cnt; /* number of used maps */ u32 used_btf_cnt; /* number of used BTF objects */ u32 id_gen; /* used to generate unique reg IDs */ + bool explore_alu_limits; bool allow_ptr_leaks; bool allow_uninit_stack; bool allow_ptr_to_map_access; diff --git a/include/linux/bpfptr.h b/include/linux/bpfptr.h index 5cdeab497cb3..546e27fc6d46 100644 --- a/include/linux/bpfptr.h +++ b/include/linux/bpfptr.h @@ -62,9 +62,17 @@ static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset, return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); } -static inline void *memdup_bpfptr(bpfptr_t src, size_t len) +static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) { - return memdup_sockptr((sockptr_t) src, len); + void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN); + + if (!p) + return ERR_PTR(-ENOMEM); + if (copy_from_bpfptr(p, src, len)) { + kvfree(p); + return ERR_PTR(-EFAULT); + } + return p; } static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index 960988d42f77..6b211323a489 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h @@ -12,6 +12,7 @@ #include <linux/blkdev.h> #include <scsi/scsi_request.h> +struct bsg_job; struct request; struct device; struct scatterlist; diff --git a/include/linux/bsg.h b/include/linux/bsg.h index dac37b6e00ec..1ac81c809da9 100644 --- a/include/linux/bsg.h +++ b/include/linux/bsg.h @@ -4,36 +4,16 @@ #include <uapi/linux/bsg.h> -struct request; +struct bsg_device; +struct device; +struct request_queue; -#ifdef CONFIG_BLK_DEV_BSG -struct bsg_ops { - int (*check_proto)(struct sg_io_v4 *hdr); - int (*fill_hdr)(struct request *rq, struct sg_io_v4 *hdr, - fmode_t mode); - int (*complete_rq)(struct request *rq, struct sg_io_v4 *hdr); - void (*free_rq)(struct request *rq); -}; +typedef int (bsg_sg_io_fn)(struct request_queue *, struct sg_io_v4 *hdr, + fmode_t mode, unsigned int timeout); -struct bsg_class_device { - struct device *class_dev; - int minor; - struct request_queue *queue; - const struct bsg_ops *ops; -}; +struct bsg_device *bsg_register_queue(struct request_queue *q, + struct device *parent, const char *name, + bsg_sg_io_fn *sg_io_fn); +void bsg_unregister_queue(struct bsg_device *bcd); -int bsg_register_queue(struct request_queue *q, struct device *parent, - const char *name, const struct bsg_ops *ops); -int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); -void bsg_unregister_queue(struct request_queue *q); -#else -static inline int bsg_scsi_register_queue(struct request_queue *q, - struct device *parent) -{ - return 0; -} -static inline void bsg_unregister_queue(struct request_queue *q) -{ -} -#endif /* CONFIG_BLK_DEV_BSG */ #endif /* _LINUX_BSG_H */ diff --git a/include/linux/btf.h b/include/linux/btf.h index 94a0c976c90f..214fde93214b 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -99,6 +99,7 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); +int btf_find_timer(const struct btf *btf, const struct btf_type *t); bool btf_type_is_void(const struct btf_type *t); s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind); const struct btf_type *btf_type_skip_modifiers(const struct btf *btf, diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h index 57890b357f85..47d9abfbdb55 100644 --- a/include/linux/btf_ids.h +++ b/include/linux/btf_ids.h @@ -82,6 +82,9 @@ __BTF_ID_LIST(name, globl) #define BTF_ID_LIST_SINGLE(name, prefix, typename) \ BTF_ID_LIST(name) \ BTF_ID(prefix, typename) +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \ + BTF_ID_LIST_GLOBAL(name) \ + BTF_ID(prefix, typename) /* * The BTF_ID_UNUSED macro defines 4 zero bytes. @@ -148,6 +151,7 @@ extern struct btf_id_set name; #define BTF_ID_UNUSED #define BTF_ID_LIST_GLOBAL(name) u32 name[1]; #define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1]; +#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1]; #define BTF_SET_START(name) static struct btf_id_set name = { 0 }; #define BTF_SET_START_GLOBAL(name) static struct btf_id_set name = { 0 }; #define BTF_SET_END(name) @@ -172,7 +176,8 @@ extern struct btf_id_set name; BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP_TW, tcp_timewait_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_TCP6, tcp6_sock) \ BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP, udp_sock) \ - BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UDP6, udp6_sock) \ + BTF_SOCK_TYPE(BTF_SOCK_TYPE_UNIX, unix_sock) enum { #define BTF_SOCK_TYPE(name, str) name, @@ -184,4 +189,6 @@ MAX_BTF_SOCK_TYPE, extern u32 btf_sock_ids[]; #endif +extern u32 btf_task_struct_ids[]; + #endif diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index e7e99da31349..6486d3c19463 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -409,7 +409,7 @@ static inline void invalidate_inode_buffers(struct inode *inode) {} static inline int remove_inode_buffers(struct inode *inode) { return 1; } static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; } static inline void invalidate_bh_lrus_cpu(int cpu) {} -static inline bool has_bh_in_lru(int cpu, void *dummy) { return 0; } +static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; } #define buffer_heads_over_limit 0 #endif /* CONFIG_BLOCK */ diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ff832e698efb..0e9bdd42dafb 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h @@ -4,9 +4,10 @@ * * Copyright (C) 2001 Ming Lei <ming.lei@canonical.com> */ -#ifndef __LINUX_BVEC_ITER_H -#define __LINUX_BVEC_ITER_H +#ifndef __LINUX_BVEC_H +#define __LINUX_BVEC_H +#include <linux/highmem.h> #include <linux/bug.h> #include <linux/errno.h> #include <linux/limits.h> @@ -183,4 +184,61 @@ static inline void bvec_advance(const struct bio_vec *bvec, } } -#endif /* __LINUX_BVEC_ITER_H */ +/** + * bvec_kmap_local - map a bvec into the kernel virtual address space + * @bvec: bvec to map + * + * Must be called on single-page bvecs only. Call kunmap_local on the returned + * address to unmap. + */ +static inline void *bvec_kmap_local(struct bio_vec *bvec) +{ + return kmap_local_page(bvec->bv_page) + bvec->bv_offset; +} + +/** + * memcpy_from_bvec - copy data from a bvec + * @bvec: bvec to copy from + * + * Must be called on single-page bvecs only. + */ +static inline void memcpy_from_bvec(char *to, struct bio_vec *bvec) +{ + memcpy_from_page(to, bvec->bv_page, bvec->bv_offset, bvec->bv_len); +} + +/** + * memcpy_to_bvec - copy data to a bvec + * @bvec: bvec to copy to + * + * Must be called on single-page bvecs only. + */ +static inline void memcpy_to_bvec(struct bio_vec *bvec, const char *from) +{ + memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, bvec->bv_len); +} + +/** + * memzero_bvec - zero all data in a bvec + * @bvec: bvec to zero + * + * Must be called on single-page bvecs only. + */ +static inline void memzero_bvec(struct bio_vec *bvec) +{ + memzero_page(bvec->bv_page, bvec->bv_offset, bvec->bv_len); +} + +/** + * bvec_virt - return the virtual address for a bvec + * @bvec: bvec to return the virtual address for + * + * Note: the caller must ensure that @bvec->bv_page is not a highmem page. + */ +static inline void *bvec_virt(struct bio_vec *bvec) +{ + WARN_ON_ONCE(PageHighMem(bvec->bv_page)); + return page_address(bvec->bv_page) + bvec->bv_offset; +} + +#endif /* __LINUX_BVEC_H */ diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index 4f72b47973c3..2f909ed084c6 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -79,24 +79,6 @@ struct cpu_cacheinfo { bool cpu_map_populated; }; -/* - * Helpers to make sure "func" is executed on the cpu whose cache - * attributes are being detected - */ -#define DEFINE_SMP_CALL_CACHE_FUNCTION(func) \ -static inline void _##func(void *ret) \ -{ \ - int cpu = smp_processor_id(); \ - *(int *)ret = __##func(cpu); \ -} \ - \ -int func(unsigned int cpu) \ -{ \ - int ret; \ - smp_call_function_single(cpu, _##func, &ret, true); \ - return ret; \ -} - struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu); int init_cache_level(unsigned int cpu); int populate_cache_leaves(unsigned int cpu); diff --git a/include/linux/can/bittiming.h b/include/linux/can/bittiming.h index ae7a3411167c..9de6e9053e34 100644 --- a/include/linux/can/bittiming.h +++ b/include/linux/can/bittiming.h @@ -37,7 +37,7 @@ * quanta, from when the bit is sent on the TX pin to when it is * received on the RX pin of the transmitter. Possible options: * - * O: automatic mode. The controller dynamically measure @tdcv + * 0: automatic mode. The controller dynamically measures @tdcv * for each transmitted CAN FD frame. * * Other values: manual mode. Use the fixed provided value. @@ -45,7 +45,7 @@ * @tdco: Transmitter Delay Compensation Offset. Offset value, in time * quanta, defining the distance between the start of the bit * reception on the RX pin of the transceiver and the SSP - * position such as SSP = @tdcv + @tdco. + * position such that SSP = @tdcv + @tdco. * * If @tdco is zero, then TDC is disabled and both @tdcv and * @tdcf should be ignored. diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 27b275e463da..2413253e54c7 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -32,6 +32,12 @@ enum can_mode { CAN_MODE_SLEEP }; +enum can_termination_gpio { + CAN_TERMINATION_GPIO_DISABLED = 0, + CAN_TERMINATION_GPIO_ENABLED, + CAN_TERMINATION_GPIO_MAX, +}; + /* * CAN common private data */ @@ -55,6 +61,8 @@ struct can_priv { unsigned int termination_const_cnt; const u16 *termination_const; u16 termination; + struct gpio_desc *termination_gpio; + u16 termination_gpio_ohms[CAN_TERMINATION_GPIO_MAX]; enum can_state state; diff --git a/include/linux/can/platform/flexcan.h b/include/linux/can/platform/flexcan.h new file mode 100644 index 000000000000..1b536fb999de --- /dev/null +++ b/include/linux/can/platform/flexcan.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Angelo Dureghello <angelo@kernel-space.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _CAN_PLATFORM_FLEXCAN_H +#define _CAN_PLATFORM_FLEXCAN_H + +struct flexcan_platform_data { + u32 clock_frequency; + u8 clk_src; +}; + +#endif /* _CAN_PLATFORM_FLEXCAN_H */ diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h index 40882df7105e..c11477620403 100644 --- a/include/linux/can/rx-offload.h +++ b/include/linux/can/rx-offload.h @@ -20,6 +20,7 @@ struct can_rx_offload { bool drop); struct sk_buff_head skb_queue; + struct sk_buff_head skb_irq_queue; u32 skb_queue_len_max; unsigned int mb_first; @@ -48,14 +49,11 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload, unsigned int *frame_len_ptr); int can_rx_offload_queue_tail(struct can_rx_offload *offload, struct sk_buff *skb); +void can_rx_offload_irq_finish(struct can_rx_offload *offload); +void can_rx_offload_threaded_irq_finish(struct can_rx_offload *offload); void can_rx_offload_del(struct can_rx_offload *offload); void can_rx_offload_enable(struct can_rx_offload *offload); -static inline void can_rx_offload_schedule(struct can_rx_offload *offload) -{ - napi_schedule(&offload->napi); -} - static inline void can_rx_offload_disable(struct can_rx_offload *offload) { napi_disable(&offload->napi); diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h index f48d0a31deae..c4fef00abdf3 100644 --- a/include/linux/cdrom.h +++ b/include/linux/cdrom.h @@ -86,11 +86,13 @@ struct cdrom_device_ops { /* play stuff */ int (*audio_ioctl) (struct cdrom_device_info *,unsigned int, void *); -/* driver specifications */ - const int capability; /* capability flags */ /* handle uniform packets for scsi type devices (scsi,atapi) */ int (*generic_packet) (struct cdrom_device_info *, struct packet_command *); + int (*read_cdda_bpc)(struct cdrom_device_info *cdi, void __user *ubuf, + u32 lba, u32 nframes, u8 *last_sense); +/* driver specifications */ + const int capability; /* capability flags */ }; int cdrom_multisession(struct cdrom_device_info *cdi, diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index e41a811026f6..bc2699feddbe 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -299,6 +299,7 @@ enum { CEPH_SESSION_FLUSHMSG_ACK, CEPH_SESSION_FORCE_RO, CEPH_SESSION_REJECT, + CEPH_SESSION_REQUEST_FLUSH_MDLOG, }; extern const char *ceph_session_op_name(int op); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index d83b829305c0..f59c875271a0 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -342,7 +342,7 @@ struct clk_fixed_rate { unsigned long flags; }; -#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0) +#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0) extern const struct clk_ops clk_fixed_rate_ops; struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev, @@ -1001,6 +1001,12 @@ struct clk_hw *devm_clk_hw_register_fixed_factor(struct device *dev, * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are * used for the divider register. Setting this flag makes the register * accesses big endian. + * CLK_FRAC_DIVIDER_POWER_OF_TWO_PS - By default the resulting fraction might + * be saturated and the caller will get quite far from the good enough + * approximation. Instead the caller may require, by setting this flag, + * to shift left by a few bits in case, when the asked one is quite small + * to satisfy the desired range of denominator. It assumes that on the + * caller's side the power-of-two capable prescaler exists. */ struct clk_fractional_divider { struct clk_hw hw; @@ -1022,8 +1028,8 @@ struct clk_fractional_divider { #define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) #define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1) +#define CLK_FRAC_DIVIDER_POWER_OF_TWO_PS BIT(2) -extern const struct clk_ops clk_fractional_divider_ops; struct clk *clk_register_fractional_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 mshift, u8 mwidth, u8 nshift, u8 nwidth, @@ -1069,9 +1075,9 @@ struct clk_multiplier { #define to_clk_multiplier(_hw) container_of(_hw, struct clk_multiplier, hw) -#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) +#define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) #define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) -#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2) +#define CLK_MULTIPLIER_BIG_ENDIAN BIT(2) extern const struct clk_ops clk_multiplier_ops; diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h index a4f82e836a7c..ccb3f034bfa9 100644 --- a/include/linux/clk/at91_pmc.h +++ b/include/linux/clk/at91_pmc.h @@ -137,6 +137,32 @@ #define AT91_PMC_PLLADIV2_ON (1 << 12) #define AT91_PMC_H32MXDIV BIT(24) +#define AT91_PMC_MCR_V2 0x30 /* Master Clock Register [SAMA7G5 only] */ +#define AT91_PMC_MCR_V2_ID_MSK (0xF) +#define AT91_PMC_MCR_V2_ID(_id) ((_id) & AT91_PMC_MCR_V2_ID_MSK) +#define AT91_PMC_MCR_V2_CMD (1 << 7) +#define AT91_PMC_MCR_V2_DIV (7 << 8) +#define AT91_PMC_MCR_V2_DIV1 (0 << 8) +#define AT91_PMC_MCR_V2_DIV2 (1 << 8) +#define AT91_PMC_MCR_V2_DIV4 (2 << 8) +#define AT91_PMC_MCR_V2_DIV8 (3 << 8) +#define AT91_PMC_MCR_V2_DIV16 (4 << 8) +#define AT91_PMC_MCR_V2_DIV32 (5 << 8) +#define AT91_PMC_MCR_V2_DIV64 (6 << 8) +#define AT91_PMC_MCR_V2_DIV3 (7 << 8) +#define AT91_PMC_MCR_V2_CSS (0x1F << 16) +#define AT91_PMC_MCR_V2_CSS_MD_SLCK (0 << 16) +#define AT91_PMC_MCR_V2_CSS_TD_SLCK (1 << 16) +#define AT91_PMC_MCR_V2_CSS_MAINCK (2 << 16) +#define AT91_PMC_MCR_V2_CSS_MCK0 (3 << 16) +#define AT91_PMC_MCR_V2_CSS_SYSPLL (5 << 16) +#define AT91_PMC_MCR_V2_CSS_DDRPLL (6 << 16) +#define AT91_PMC_MCR_V2_CSS_IMGPLL (7 << 16) +#define AT91_PMC_MCR_V2_CSS_BAUDPLL (8 << 16) +#define AT91_PMC_MCR_V2_CSS_AUDIOPLL (9 << 16) +#define AT91_PMC_MCR_V2_CSS_ETHPLL (10 << 16) +#define AT91_PMC_MCR_V2_EN (1 << 28) + #define AT91_PMC_XTALF 0x34 /* Main XTAL Frequency Register [SAMA7G5 only] */ #define AT91_PMC_USB 0x38 /* USB Clock Register [some SAM9 only] */ diff --git a/include/linux/cmdline-parser.h b/include/linux/cmdline-parser.h deleted file mode 100644 index 68a541807bdf..000000000000 --- a/include/linux/cmdline-parser.h +++ /dev/null @@ -1,46 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Parsing command line, get the partitions information. - * - * Written by Cai Zhiyong <caizhiyong@huawei.com> - * - */ -#ifndef CMDLINEPARSEH -#define CMDLINEPARSEH - -#include <linux/blkdev.h> -#include <linux/fs.h> -#include <linux/slab.h> - -/* partition flags */ -#define PF_RDONLY 0x01 /* Device is read only */ -#define PF_POWERUP_LOCK 0x02 /* Always locked after reset */ - -struct cmdline_subpart { - char name[BDEVNAME_SIZE]; /* partition name, such as 'rootfs' */ - sector_t from; - sector_t size; - int flags; - struct cmdline_subpart *next_subpart; -}; - -struct cmdline_parts { - char name[BDEVNAME_SIZE]; /* block device, such as 'mmcblk0' */ - unsigned int nr_subparts; - struct cmdline_subpart *subpart; - struct cmdline_parts *next_parts; -}; - -void cmdline_parts_free(struct cmdline_parts **parts); - -int cmdline_parts_parse(struct cmdline_parts **parts, const char *cmdline); - -struct cmdline_parts *cmdline_parts_find(struct cmdline_parts *parts, - const char *bdev); - -int cmdline_parts_set(struct cmdline_parts *parts, sector_t disk_size, - int slot, - int (*add_part)(int, struct cmdline_subpart *, void *), - void *param); - -#endif /* CMDLINEPARSEH */ diff --git a/include/linux/compaction.h b/include/linux/compaction.h index c24098c7acca..34bce35c808d 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -84,6 +84,8 @@ static inline unsigned long compact_gap(unsigned int order) extern unsigned int sysctl_compaction_proactiveness; extern int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos); +extern int compaction_proactiveness_sysctl_handler(struct ctl_table *table, + int write, void *buffer, size_t *length, loff_t *ppos); extern int sysctl_extfrag_threshold; extern int sysctl_compact_unevictable_allowed; diff --git a/include/linux/compat.h b/include/linux/compat.h index c270124e4402..1c758b0e0359 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h @@ -20,11 +20,8 @@ #include <linux/unistd.h> #include <asm/compat.h> - -#ifdef CONFIG_COMPAT #include <asm/siginfo.h> #include <asm/signal.h> -#endif #ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER /* @@ -95,8 +92,6 @@ struct compat_iovec { compat_size_t iov_len; }; -#ifdef CONFIG_COMPAT - #ifndef compat_user_stack_pointer #define compat_user_stack_pointer() current_user_stack_pointer() #endif @@ -131,9 +126,11 @@ struct compat_tms { #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) +#ifndef compat_sigset_t typedef struct { compat_sigset_word sig[_COMPAT_NSIG_WORDS]; } compat_sigset_t; +#endif int set_compat_user_sigmask(const compat_sigset_t __user *umask, size_t sigsetsize); @@ -384,6 +381,7 @@ struct compat_keyctl_kdf_params { __u32 __spare[8]; }; +struct compat_stat; struct compat_statfs; struct compat_statfs64; struct compat_old_linux_dirent; @@ -397,14 +395,6 @@ struct compat_kexec_segment; struct compat_mq_attr; struct compat_msgbuf; -#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) - -#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) - -long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, - unsigned long bitmap_size); -long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, - unsigned long bitmap_size); void copy_siginfo_to_external32(struct compat_siginfo *to, const struct kernel_siginfo *from); int copy_siginfo_from_user32(kernel_siginfo_t *to, @@ -428,7 +418,7 @@ put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set, unsigned int size) { /* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */ -#ifdef __BIG_ENDIAN +#if defined(__BIG_ENDIAN) && defined(CONFIG_64BIT) compat_sigset_t v; switch (_NSIG_WORDS) { case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3]; @@ -521,8 +511,6 @@ extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, struct epoll_event; /* fortunately, this one is fixed-layout */ -extern void __user *compat_alloc_user_space(unsigned long len); - int compat_restore_altstack(const compat_stack_t __user *uss); int __compat_save_altstack(compat_stack_t __user *, unsigned long); #define unsafe_compat_save_altstack(uss, sp, label) do { \ @@ -809,26 +797,6 @@ asmlinkage long compat_sys_execve(const char __user *filename, const compat_uptr /* mm/fadvise.c: No generic prototype for fadvise64_64 */ /* mm/, CONFIG_MMU only */ -asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len, - compat_ulong_t mode, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, compat_ulong_t flags); -asmlinkage long compat_sys_get_mempolicy(int __user *policy, - compat_ulong_t __user *nmask, - compat_ulong_t maxnode, - compat_ulong_t addr, - compat_ulong_t flags); -asmlinkage long compat_sys_set_mempolicy(int mode, compat_ulong_t __user *nmask, - compat_ulong_t maxnode); -asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, - compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, - const compat_ulong_t __user *new_nodes); -asmlinkage long compat_sys_move_pages(pid_t pid, compat_ulong_t nr_pages, - __u32 __user *pages, - const int __user *nodes, - int __user *status, - int flags); - asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig, struct compat_siginfo __user *uinfo); @@ -929,17 +897,6 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args); #endif /* CONFIG_ARCH_HAS_SYSCALL_WRAPPER */ - -/* - * For most but not all architectures, "am I in a compat syscall?" and - * "am I a compat task?" are the same question. For architectures on which - * they aren't the same question, arch code can override in_compat_syscall. - */ - -#ifndef in_compat_syscall -static inline bool in_compat_syscall(void) { return is_compat_task(); } -#endif - /** * ns_to_old_timeval32 - Compat version of ns_to_timeval * @nsec: the nanoseconds value to be converted @@ -969,6 +926,17 @@ int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz, int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz, struct compat_statfs64 __user * buf); +#ifdef CONFIG_COMPAT + +/* + * For most but not all architectures, "am I in a compat syscall?" and + * "am I a compat task?" are the same question. For architectures on which + * they aren't the same question, arch code can override in_compat_syscall. + */ +#ifndef in_compat_syscall +static inline bool in_compat_syscall(void) { return is_compat_task(); } +#endif + #else /* !CONFIG_COMPAT */ #define is_compat_task() (0) @@ -978,6 +946,15 @@ static inline bool in_compat_syscall(void) { return false; } #endif /* CONFIG_COMPAT */ +#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) + +#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG) + +long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, + unsigned long bitmap_size); +long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, + unsigned long bitmap_size); + /* * Some legacy ABIs like the i386 one use less than natural alignment for 64-bit * types, and will need special compat treatment for that. Most architectures diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index cb9217fc60af..21c36b69eb06 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -43,9 +43,6 @@ #define __compiletime_object_size(obj) __builtin_object_size(obj, 0) -#define __compiletime_warning(message) __attribute__((__warning__(message))) -#define __compiletime_error(message) __attribute__((__error__(message))) - #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) #define __latent_entropy __attribute__((latent_entropy)) #endif diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 2487be0e7199..8f2106e9e5c1 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h @@ -30,6 +30,7 @@ # define __GCC4_has_attribute___assume_aligned__ 1 # define __GCC4_has_attribute___copy__ 0 # define __GCC4_has_attribute___designated_init__ 0 +# define __GCC4_has_attribute___error__ 1 # define __GCC4_has_attribute___externally_visible__ 1 # define __GCC4_has_attribute___no_caller_saved_registers__ 0 # define __GCC4_has_attribute___noclone__ 1 @@ -37,7 +38,9 @@ # define __GCC4_has_attribute___nonstring__ 0 # define __GCC4_has_attribute___no_sanitize_address__ 1 # define __GCC4_has_attribute___no_sanitize_undefined__ 1 +# define __GCC4_has_attribute___no_sanitize_coverage__ 0 # define __GCC4_has_attribute___fallthrough__ 0 +# define __GCC4_has_attribute___warning__ 1 #endif /* @@ -138,6 +141,17 @@ #endif /* + * Optional: only supported since clang >= 14.0 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-error-function-attribute + */ +#if __has_attribute(__error__) +# define __compiletime_error(msg) __attribute__((__error__(msg))) +#else +# define __compiletime_error(msg) +#endif + +/* * Optional: not supported by clang * * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute @@ -299,6 +313,17 @@ #define __must_check __attribute__((__warn_unused_result__)) /* + * Optional: only supported since clang >= 14.0 + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warning-function-attribute + */ +#if __has_attribute(__warning__) +# define __compiletime_warning(msg) __attribute__((__warning__(msg))) +#else +# define __compiletime_warning(msg) +#endif + +/* * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index e4ea86fc584d..b6ff83a714ca 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -294,12 +294,6 @@ struct ftrace_likely_data { #ifndef __compiletime_object_size # define __compiletime_object_size(obj) -1 #endif -#ifndef __compiletime_warning -# define __compiletime_warning(message) -#endif -#ifndef __compiletime_error -# define __compiletime_error(message) -#endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 85008a65e21f..93a2922b7653 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -220,6 +220,10 @@ struct coresight_sysfs_link { * @nr_links: number of sysfs links created to other components from this * device. These will appear in the "connections" group. * @has_conns_grp: Have added a "connections" group for sysfs links. + * @feature_csdev_list: List of complex feature programming added to the device. + * @config_csdev_list: List of system configurations added to the device. + * @cscfg_csdev_lock: Protect the lists of configurations and features. + * @active_cscfg_ctxt: Context information for current active system configuration. */ struct coresight_device { struct coresight_platform_data *pdata; @@ -241,6 +245,11 @@ struct coresight_device { int nr_links; bool has_conns_grp; bool ect_enabled; /* true only if associated ect device is enabled */ + /* system configuration and feature lists */ + struct list_head feature_csdev_list; + struct list_head config_csdev_list; + spinlock_t cscfg_csdev_lock; + void *active_cscfg_ctxt; }; /* diff --git a/include/linux/counter.h b/include/linux/counter.h index 9dbd5df4cd34..d16ce2819b48 100644 --- a/include/linux/counter.h +++ b/include/linux/counter.h @@ -162,15 +162,15 @@ struct counter_count_ext { void *priv; }; -enum counter_count_function { - COUNTER_COUNT_FUNCTION_INCREASE = 0, - COUNTER_COUNT_FUNCTION_DECREASE, - COUNTER_COUNT_FUNCTION_PULSE_DIRECTION, - COUNTER_COUNT_FUNCTION_QUADRATURE_X1_A, - COUNTER_COUNT_FUNCTION_QUADRATURE_X1_B, - COUNTER_COUNT_FUNCTION_QUADRATURE_X2_A, - COUNTER_COUNT_FUNCTION_QUADRATURE_X2_B, - COUNTER_COUNT_FUNCTION_QUADRATURE_X4 +enum counter_function { + COUNTER_FUNCTION_INCREASE = 0, + COUNTER_FUNCTION_DECREASE, + COUNTER_FUNCTION_PULSE_DIRECTION, + COUNTER_FUNCTION_QUADRATURE_X1_A, + COUNTER_FUNCTION_QUADRATURE_X1_B, + COUNTER_FUNCTION_QUADRATURE_X2_A, + COUNTER_FUNCTION_QUADRATURE_X2_B, + COUNTER_FUNCTION_QUADRATURE_X4 }; /** @@ -192,7 +192,7 @@ struct counter_count { const char *name; size_t function; - const enum counter_count_function *functions_list; + const enum counter_function *functions_list; size_t num_functions; struct counter_synapse *synapses; @@ -290,16 +290,16 @@ struct counter_device_state { const struct attribute_group **groups; }; -enum counter_signal_value { - COUNTER_SIGNAL_LOW = 0, - COUNTER_SIGNAL_HIGH +enum counter_signal_level { + COUNTER_SIGNAL_LEVEL_LOW, + COUNTER_SIGNAL_LEVEL_HIGH, }; /** * struct counter_ops - Callbacks from driver * @signal_read: optional read callback for Signal attribute. The read - * value of the respective Signal should be passed back via - * the val parameter. + * level of the respective Signal should be passed back via + * the level parameter. * @count_read: optional read callback for Count attribute. The read * value of the respective Count should be passed back via * the val parameter. @@ -324,7 +324,7 @@ enum counter_signal_value { struct counter_ops { int (*signal_read)(struct counter_device *counter, struct counter_signal *signal, - enum counter_signal_value *val); + enum counter_signal_level *level); int (*count_read)(struct counter_device *counter, struct counter_count *count, unsigned long *val); int (*count_write)(struct counter_device *counter, diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 94a578a96202..9cf51e41e697 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -143,12 +143,6 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; } static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { } #endif /* !CONFIG_HOTPLUG_CPU */ -/* Wrappers which go away once all code is converted */ -static inline void cpu_hotplug_begin(void) { cpus_write_lock(); } -static inline void cpu_hotplug_done(void) { cpus_write_unlock(); } -static inline void get_online_cpus(void) { cpus_read_lock(); } -static inline void put_online_cpus(void) { cpus_read_unlock(); } - #ifdef CONFIG_PM_SLEEP_SMP extern int freeze_secondary_cpus(int primary); extern void thaw_secondary_cpus(void); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 9fd719475fcd..ff88bb3e44fc 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -9,10 +9,14 @@ #define _LINUX_CPUFREQ_H #include <linux/clk.h> +#include <linux/cpu.h> #include <linux/cpumask.h> #include <linux/completion.h> #include <linux/kobject.h> #include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/pm_opp.h> #include <linux/pm_qos.h> #include <linux/spinlock.h> #include <linux/sysfs.h> @@ -365,14 +369,17 @@ struct cpufreq_driver { int (*suspend)(struct cpufreq_policy *policy); int (*resume)(struct cpufreq_policy *policy); - /* Will be called after the driver is fully initialized */ - void (*ready)(struct cpufreq_policy *policy); - struct freq_attr **attr; /* platform specific boost support code */ bool boost_enabled; int (*set_boost)(struct cpufreq_policy *policy, int state); + + /* + * Set by drivers that want to register with the energy model after the + * policy is properly initialized, but before the governor is started. + */ + void (*register_em)(struct cpufreq_policy *policy); }; /* flags */ @@ -995,6 +1002,55 @@ static inline int cpufreq_table_count_valid_entries(const struct cpufreq_policy return count; } + +static inline int parse_perf_domain(int cpu, const char *list_name, + const char *cell_name) +{ + struct device_node *cpu_np; + struct of_phandle_args args; + int ret; + + cpu_np = of_cpu_device_node_get(cpu); + if (!cpu_np) + return -ENODEV; + + ret = of_parse_phandle_with_args(cpu_np, list_name, cell_name, 0, + &args); + if (ret < 0) + return ret; + + of_node_put(cpu_np); + + return args.args[0]; +} + +static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, + const char *cell_name, struct cpumask *cpumask) +{ + int target_idx; + int cpu, ret; + + ret = parse_perf_domain(pcpu, list_name, cell_name); + if (ret < 0) + return ret; + + target_idx = ret; + cpumask_set_cpu(pcpu, cpumask); + + for_each_possible_cpu(cpu) { + if (cpu == pcpu) + continue; + + ret = parse_perf_domain(pcpu, list_name, cell_name); + if (ret < 0) + continue; + + if (target_idx == ret) + cpumask_set_cpu(cpu, cpumask); + } + + return target_idx; +} #else static inline int cpufreq_boost_trigger_state(int state) { @@ -1014,6 +1070,12 @@ static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) { return false; } + +static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_name, + const char *cell_name, struct cpumask *cpumask) +{ + return -EOPNOTSUPP; +} #endif #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) @@ -1035,7 +1097,6 @@ void arch_set_freq_scale(const struct cpumask *cpus, { } #endif - /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; @@ -1046,4 +1107,10 @@ unsigned int cpufreq_generic_get(unsigned int cpu); void cpufreq_generic_init(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table, unsigned int transition_latency); + +static inline void cpufreq_register_em_with_opp(struct cpufreq_policy *policy) +{ + dev_pm_opp_of_register_em(get_cpu_device(policy->cpu), + policy->related_cpus); +} #endif /* _LINUX_CPUFREQ_H */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f39b34b13871..832d8a74fa59 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -22,8 +22,42 @@ * AP_ACTIVE AP_ACTIVE */ +/* + * CPU hotplug states. The state machine invokes the installed state + * startup callbacks sequentially from CPUHP_OFFLINE + 1 to CPUHP_ONLINE + * during a CPU online operation. During a CPU offline operation the + * installed teardown callbacks are invoked in the reverse order from + * CPU_ONLINE - 1 down to CPUHP_OFFLINE. + * + * The state space has three sections: PREPARE, STARTING and ONLINE. + * + * PREPARE: The callbacks are invoked on a control CPU before the + * hotplugged CPU is started up or after the hotplugged CPU has died. + * + * STARTING: The callbacks are invoked on the hotplugged CPU from the low level + * hotplug startup/teardown code with interrupts disabled. + * + * ONLINE: The callbacks are invoked on the hotplugged CPU from the per CPU + * hotplug thread with interrupts and preemption enabled. + * + * Adding explicit states to this enum is only necessary when: + * + * 1) The state is within the STARTING section + * + * 2) The state has ordering constraints vs. other states in the + * same section. + * + * If neither #1 nor #2 apply, please use the dynamic state space when + * setting up a state by using CPUHP_PREPARE_DYN or CPUHP_PREPARE_ONLINE + * for the @state argument of the setup function. + * + * See Documentation/core-api/cpu_hotplug.rst for further information and + * examples. + */ enum cpuhp_state { CPUHP_INVALID = -1, + + /* PREPARE section invoked on a control CPU */ CPUHP_OFFLINE = 0, CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, @@ -46,12 +80,14 @@ enum cpuhp_state { CPUHP_ARM_OMAP_WAKE_DEAD, CPUHP_IRQ_POLL_DEAD, CPUHP_BLOCK_SOFTIRQ_DEAD, + CPUHP_BIO_DEAD, CPUHP_ACPI_CPUDRV_DEAD, CPUHP_S390_PFAULT_DEAD, CPUHP_BLK_MQ_DEAD, CPUHP_FS_BUFF_DEAD, CPUHP_PRINTK_DEAD, CPUHP_MM_MEMCQ_DEAD, + CPUHP_XFS_DEAD, CPUHP_PERCPU_CNT_DEAD, CPUHP_RADIX_DEAD, CPUHP_PAGE_ALLOC, @@ -93,6 +129,11 @@ enum cpuhp_state { CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BRINGUP_CPU, + + /* + * STARTING section invoked on the hotplugged CPU in low level + * bringup and teardown code. + */ CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, @@ -153,6 +194,8 @@ enum cpuhp_state { CPUHP_AP_ARM_CACHE_B15_RAC_DYING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, + + /* Online section invoked on the hotplugged CPU from the hotplug thread */ CPUHP_AP_ONLINE_IDLE, CPUHP_AP_SCHED_WAIT_EMPTY, CPUHP_AP_SMPBOOT_THREADS, @@ -214,14 +257,15 @@ int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, int (*teardown)(unsigned int cpu), bool multi_instance); /** - * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks + * cpuhp_setup_state - Setup hotplug state callbacks with calling the @startup + * callback * @state: The state for which the calls are installed * @name: Name of the callback (will be used in debug output) - * @startup: startup callback function - * @teardown: teardown callback function + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required * - * Installs the callback functions and invokes the startup callback on - * the present cpus which have already reached the @state. + * Installs the callback functions and invokes the @startup callback on + * the online cpus which have already reached the @state. */ static inline int cpuhp_setup_state(enum cpuhp_state state, const char *name, @@ -231,6 +275,18 @@ static inline int cpuhp_setup_state(enum cpuhp_state state, return __cpuhp_setup_state(state, name, true, startup, teardown, false); } +/** + * cpuhp_setup_state_cpuslocked - Setup hotplug state callbacks with calling + * @startup callback from a cpus_read_lock() + * held region + * @state: The state for which the calls are installed + * @name: Name of the callback (will be used in debug output) + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required + * + * Same as cpuhp_setup_state() except that it must be invoked from within a + * cpus_read_lock() held region. + */ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), @@ -242,14 +298,14 @@ static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state, /** * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the - * callbacks + * @startup callback * @state: The state for which the calls are installed * @name: Name of the callback. - * @startup: startup callback function - * @teardown: teardown callback function + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required * - * Same as @cpuhp_setup_state except that no calls are executed are invoked - * during installation of this callback. NOP if SMP=n or HOTPLUG_CPU=n. + * Same as cpuhp_setup_state() except that the @startup callback is not + * invoked during installation. NOP if SMP=n or HOTPLUG_CPU=n. */ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, const char *name, @@ -260,6 +316,19 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state, false); } +/** + * cpuhp_setup_state_nocalls_cpuslocked - Setup hotplug state callbacks without + * invoking the @startup callback from + * a cpus_read_lock() held region + * callbacks + * @state: The state for which the calls are installed + * @name: Name of the callback. + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required + * + * Same as cpuhp_setup_state_nocalls() except that it must be invoked from + * within a cpus_read_lock() held region. + */ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, const char *name, int (*startup)(unsigned int cpu), @@ -273,13 +342,13 @@ static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state, * cpuhp_setup_state_multi - Add callbacks for multi state * @state: The state for which the calls are installed * @name: Name of the callback. - * @startup: startup callback function - * @teardown: teardown callback function + * @startup: startup callback function or NULL if not required + * @teardown: teardown callback function or NULL if not required * * Sets the internal multi_instance flag and prepares a state to work as a multi * instance callback. No callbacks are invoked at this point. The callbacks are * invoked once an instance for this state are registered via - * @cpuhp_state_add_instance or @cpuhp_state_add_instance_nocalls. + * cpuhp_state_add_instance() or cpuhp_state_add_instance_nocalls() */ static inline int cpuhp_setup_state_multi(enum cpuhp_state state, const char *name, @@ -304,9 +373,10 @@ int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state, * @state: The state for which the instance is installed * @node: The node for this individual state. * - * Installs the instance for the @state and invokes the startup callback on - * the present cpus which have already reached the @state. The @state must have - * been earlier marked as multi-instance by @cpuhp_setup_state_multi. + * Installs the instance for the @state and invokes the registered startup + * callback on the online cpus which have already reached the @state. The + * @state must have been earlier marked as multi-instance by + * cpuhp_setup_state_multi(). */ static inline int cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node) @@ -320,8 +390,9 @@ static inline int cpuhp_state_add_instance(enum cpuhp_state state, * @state: The state for which the instance is installed * @node: The node for this individual state. * - * Installs the instance for the @state The @state must have been earlier - * marked as multi-instance by @cpuhp_setup_state_multi. + * Installs the instance for the @state. The @state must have been earlier + * marked as multi-instance by cpuhp_setup_state_multi. NOP if SMP=n or + * HOTPLUG_CPU=n. */ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, struct hlist_node *node) @@ -329,6 +400,17 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state, return __cpuhp_state_add_instance(state, node, false); } +/** + * cpuhp_state_add_instance_nocalls_cpuslocked - Add an instance for a state + * without invoking the startup + * callback from a cpus_read_lock() + * held region. + * @state: The state for which the instance is installed + * @node: The node for this individual state. + * + * Same as cpuhp_state_add_instance_nocalls() except that it must be + * invoked from within a cpus_read_lock() held region. + */ static inline int cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state, struct hlist_node *node) @@ -344,7 +426,7 @@ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke); * @state: The state for which the calls are removed * * Removes the callback functions and invokes the teardown callback on - * the present cpus which have already reached the @state. + * the online cpus which have already reached the @state. */ static inline void cpuhp_remove_state(enum cpuhp_state state) { @@ -353,7 +435,7 @@ static inline void cpuhp_remove_state(enum cpuhp_state state) /** * cpuhp_remove_state_nocalls - Remove hotplug state callbacks without invoking - * teardown + * the teardown callback * @state: The state for which the calls are removed */ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) @@ -361,6 +443,14 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state) __cpuhp_remove_state(state, false); } +/** + * cpuhp_remove_state_nocalls_cpuslocked - Remove hotplug state callbacks without invoking + * teardown from a cpus_read_lock() held region. + * @state: The state for which the calls are removed + * + * Same as cpuhp_remove_state nocalls() except that it must be invoked + * from within a cpus_read_lock() held region. + */ static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state) { __cpuhp_remove_state_cpuslocked(state, false); @@ -388,8 +478,8 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state, * @state: The state from which the instance is removed * @node: The node for this individual state. * - * Removes the instance and invokes the teardown callback on the present cpus - * which have already reached the @state. + * Removes the instance and invokes the teardown callback on the online cpus + * which have already reached @state. */ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, struct hlist_node *node) @@ -399,7 +489,7 @@ static inline int cpuhp_state_remove_instance(enum cpuhp_state state, /** * cpuhp_state_remove_instance_nocalls - Remove hotplug instance from state - * without invoking the reatdown callback + * without invoking the teardown callback * @state: The state from which the instance is removed * @node: The node for this individual state. * diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index f3689a52bfd0..5d4d07a9e1ed 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -983,6 +983,44 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask) nr_cpu_ids); } +/** + * cpumap_print_bitmask_to_buf - copies the cpumask into the buffer as + * hex values of cpumask + * + * @buf: the buffer to copy into + * @mask: the cpumask to copy + * @off: in the string from which we are copying, we copy to @buf + * @count: the maximum number of bytes to print + * + * The function prints the cpumask into the buffer as hex values of + * cpumask; Typically used by bin_attribute to export cpumask bitmask + * ABI. + * + * Returns the length of how many bytes have been copied. + */ +static inline ssize_t +cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask, + loff_t off, size_t count) +{ + return bitmap_print_bitmask_to_buf(buf, cpumask_bits(mask), + nr_cpu_ids, off, count); +} + +/** + * cpumap_print_list_to_buf - copies the cpumask into the buffer as + * comma-separated list of cpus + * + * Everything is same with the above cpumap_print_bitmask_to_buf() + * except the print format. + */ +static inline ssize_t +cpumap_print_list_to_buf(char *buf, const struct cpumask *mask, + loff_t off, size_t count) +{ + return bitmap_print_list_to_buf(buf, cpumask_bits(mask), + nr_cpu_ids, off, count); +} + #if NR_CPUS <= BITS_PER_LONG #define CPU_MASK_ALL \ (cpumask_t) { { \ diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 04c20de66afc..d2b9c41c8edf 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -15,6 +15,7 @@ #include <linux/cpumask.h> #include <linux/nodemask.h> #include <linux/mm.h> +#include <linux/mmu_context.h> #include <linux/jump_label.h> #ifdef CONFIG_CPUSETS @@ -58,7 +59,7 @@ extern void cpuset_wait_for_hotplug(void); extern void cpuset_read_lock(void); extern void cpuset_read_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); -extern void cpuset_cpus_allowed_fallback(struct task_struct *p); +extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); #define cpuset_current_mems_allowed (current->mems_allowed) void cpuset_init_current_mems_allowed(void); @@ -184,11 +185,12 @@ static inline void cpuset_read_unlock(void) { } static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) { - cpumask_copy(mask, cpu_possible_mask); + cpumask_copy(mask, task_cpu_possible_mask(p)); } -static inline void cpuset_cpus_allowed_fallback(struct task_struct *p) +static inline bool cpuset_cpus_allowed_fallback(struct task_struct *p) { + return false; } static inline nodemask_t cpuset_mems_allowed(struct task_struct *p) diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index a5192b718dbe..2618577a4d6d 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h @@ -10,13 +10,14 @@ #include <linux/pgtable.h> /* for pgprot_t */ -#ifdef CONFIG_CRASH_DUMP +/* For IS_ENABLED(CONFIG_CRASH_DUMP) */ #define ELFCORE_ADDR_MAX (-1ULL) #define ELFCORE_ADDR_ERR (-2ULL) extern unsigned long long elfcorehdr_addr; extern unsigned long long elfcorehdr_size; +#ifdef CONFIG_CRASH_DUMP extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); extern void elfcorehdr_free(unsigned long long addr); extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); diff --git a/include/linux/damon.h b/include/linux/damon.h new file mode 100644 index 000000000000..d68b67b8d458 --- /dev/null +++ b/include/linux/damon.h @@ -0,0 +1,268 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * DAMON api + * + * Author: SeongJae Park <sjpark@amazon.de> + */ + +#ifndef _DAMON_H_ +#define _DAMON_H_ + +#include <linux/mutex.h> +#include <linux/time64.h> +#include <linux/types.h> + +/* Minimal region size. Every damon_region is aligned by this. */ +#define DAMON_MIN_REGION PAGE_SIZE + +/** + * struct damon_addr_range - Represents an address region of [@start, @end). + * @start: Start address of the region (inclusive). + * @end: End address of the region (exclusive). + */ +struct damon_addr_range { + unsigned long start; + unsigned long end; +}; + +/** + * struct damon_region - Represents a monitoring target region. + * @ar: The address range of the region. + * @sampling_addr: Address of the sample for the next access check. + * @nr_accesses: Access frequency of this region. + * @list: List head for siblings. + */ +struct damon_region { + struct damon_addr_range ar; + unsigned long sampling_addr; + unsigned int nr_accesses; + struct list_head list; +}; + +/** + * struct damon_target - Represents a monitoring target. + * @id: Unique identifier for this target. + * @nr_regions: Number of monitoring target regions of this target. + * @regions_list: Head of the monitoring target regions of this target. + * @list: List head for siblings. + * + * Each monitoring context could have multiple targets. For example, a context + * for virtual memory address spaces could have multiple target processes. The + * @id of each target should be unique among the targets of the context. For + * example, in the virtual address monitoring context, it could be a pidfd or + * an address of an mm_struct. + */ +struct damon_target { + unsigned long id; + unsigned int nr_regions; + struct list_head regions_list; + struct list_head list; +}; + +struct damon_ctx; + +/** + * struct damon_primitive Monitoring primitives for given use cases. + * + * @init: Initialize primitive-internal data structures. + * @update: Update primitive-internal data structures. + * @prepare_access_checks: Prepare next access check of target regions. + * @check_accesses: Check the accesses to target regions. + * @reset_aggregated: Reset aggregated accesses monitoring results. + * @target_valid: Determine if the target is valid. + * @cleanup: Clean up the context. + * + * DAMON can be extended for various address spaces and usages. For this, + * users should register the low level primitives for their target address + * space and usecase via the &damon_ctx.primitive. Then, the monitoring thread + * (&damon_ctx.kdamond) calls @init and @prepare_access_checks before starting + * the monitoring, @update after each &damon_ctx.primitive_update_interval, and + * @check_accesses, @target_valid and @prepare_access_checks after each + * &damon_ctx.sample_interval. Finally, @reset_aggregated is called after each + * &damon_ctx.aggr_interval. + * + * @init should initialize primitive-internal data structures. For example, + * this could be used to construct proper monitoring target regions and link + * those to @damon_ctx.adaptive_targets. + * @update should update the primitive-internal data structures. For example, + * this could be used to update monitoring target regions for current status. + * @prepare_access_checks should manipulate the monitoring regions to be + * prepared for the next access check. + * @check_accesses should check the accesses to each region that made after the + * last preparation and update the number of observed accesses of each region. + * It should also return max number of observed accesses that made as a result + * of its update. The value will be used for regions adjustment threshold. + * @reset_aggregated should reset the access monitoring results that aggregated + * by @check_accesses. + * @target_valid should check whether the target is still valid for the + * monitoring. + * @cleanup is called from @kdamond just before its termination. + */ +struct damon_primitive { + void (*init)(struct damon_ctx *context); + void (*update)(struct damon_ctx *context); + void (*prepare_access_checks)(struct damon_ctx *context); + unsigned int (*check_accesses)(struct damon_ctx *context); + void (*reset_aggregated)(struct damon_ctx *context); + bool (*target_valid)(void *target); + void (*cleanup)(struct damon_ctx *context); +}; + +/* + * struct damon_callback Monitoring events notification callbacks. + * + * @before_start: Called before starting the monitoring. + * @after_sampling: Called after each sampling. + * @after_aggregation: Called after each aggregation. + * @before_terminate: Called before terminating the monitoring. + * @private: User private data. + * + * The monitoring thread (&damon_ctx.kdamond) calls @before_start and + * @before_terminate just before starting and finishing the monitoring, + * respectively. Therefore, those are good places for installing and cleaning + * @private. + * + * The monitoring thread calls @after_sampling and @after_aggregation for each + * of the sampling intervals and aggregation intervals, respectively. + * Therefore, users can safely access the monitoring results without additional + * protection. For the reason, users are recommended to use these callback for + * the accesses to the results. + * + * If any callback returns non-zero, monitoring stops. + */ +struct damon_callback { + void *private; + + int (*before_start)(struct damon_ctx *context); + int (*after_sampling)(struct damon_ctx *context); + int (*after_aggregation)(struct damon_ctx *context); + int (*before_terminate)(struct damon_ctx *context); +}; + +/** + * struct damon_ctx - Represents a context for each monitoring. This is the + * main interface that allows users to set the attributes and get the results + * of the monitoring. + * + * @sample_interval: The time between access samplings. + * @aggr_interval: The time between monitor results aggregations. + * @primitive_update_interval: The time between monitoring primitive updates. + * + * For each @sample_interval, DAMON checks whether each region is accessed or + * not. It aggregates and keeps the access information (number of accesses to + * each region) for @aggr_interval time. DAMON also checks whether the target + * memory regions need update (e.g., by ``mmap()`` calls from the application, + * in case of virtual memory monitoring) and applies the changes for each + * @primitive_update_interval. All time intervals are in micro-seconds. + * Please refer to &struct damon_primitive and &struct damon_callback for more + * detail. + * + * @kdamond: Kernel thread who does the monitoring. + * @kdamond_stop: Notifies whether kdamond should stop. + * @kdamond_lock: Mutex for the synchronizations with @kdamond. + * + * For each monitoring context, one kernel thread for the monitoring is + * created. The pointer to the thread is stored in @kdamond. + * + * Once started, the monitoring thread runs until explicitly required to be + * terminated or every monitoring target is invalid. The validity of the + * targets is checked via the &damon_primitive.target_valid of @primitive. The + * termination can also be explicitly requested by writing non-zero to + * @kdamond_stop. The thread sets @kdamond to NULL when it terminates. + * Therefore, users can know whether the monitoring is ongoing or terminated by + * reading @kdamond. Reads and writes to @kdamond and @kdamond_stop from + * outside of the monitoring thread must be protected by @kdamond_lock. + * + * Note that the monitoring thread protects only @kdamond and @kdamond_stop via + * @kdamond_lock. Accesses to other fields must be protected by themselves. + * + * @primitive: Set of monitoring primitives for given use cases. + * @callback: Set of callbacks for monitoring events notifications. + * + * @min_nr_regions: The minimum number of adaptive monitoring regions. + * @max_nr_regions: The maximum number of adaptive monitoring regions. + * @adaptive_targets: Head of monitoring targets (&damon_target) list. + */ +struct damon_ctx { + unsigned long sample_interval; + unsigned long aggr_interval; + unsigned long primitive_update_interval; + +/* private: internal use only */ + struct timespec64 last_aggregation; + struct timespec64 last_primitive_update; + +/* public: */ + struct task_struct *kdamond; + bool kdamond_stop; + struct mutex kdamond_lock; + + struct damon_primitive primitive; + struct damon_callback callback; + + unsigned long min_nr_regions; + unsigned long max_nr_regions; + struct list_head adaptive_targets; +}; + +#define damon_next_region(r) \ + (container_of(r->list.next, struct damon_region, list)) + +#define damon_prev_region(r) \ + (container_of(r->list.prev, struct damon_region, list)) + +#define damon_for_each_region(r, t) \ + list_for_each_entry(r, &t->regions_list, list) + +#define damon_for_each_region_safe(r, next, t) \ + list_for_each_entry_safe(r, next, &t->regions_list, list) + +#define damon_for_each_target(t, ctx) \ + list_for_each_entry(t, &(ctx)->adaptive_targets, list) + +#define damon_for_each_target_safe(t, next, ctx) \ + list_for_each_entry_safe(t, next, &(ctx)->adaptive_targets, list) + +#ifdef CONFIG_DAMON + +struct damon_region *damon_new_region(unsigned long start, unsigned long end); +inline void damon_insert_region(struct damon_region *r, + struct damon_region *prev, struct damon_region *next, + struct damon_target *t); +void damon_add_region(struct damon_region *r, struct damon_target *t); +void damon_destroy_region(struct damon_region *r, struct damon_target *t); + +struct damon_target *damon_new_target(unsigned long id); +void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); +void damon_free_target(struct damon_target *t); +void damon_destroy_target(struct damon_target *t); +unsigned int damon_nr_regions(struct damon_target *t); + +struct damon_ctx *damon_new_ctx(void); +void damon_destroy_ctx(struct damon_ctx *ctx); +int damon_set_targets(struct damon_ctx *ctx, + unsigned long *ids, ssize_t nr_ids); +int damon_set_attrs(struct damon_ctx *ctx, unsigned long sample_int, + unsigned long aggr_int, unsigned long primitive_upd_int, + unsigned long min_nr_reg, unsigned long max_nr_reg); +int damon_nr_running_ctxs(void); + +int damon_start(struct damon_ctx **ctxs, int nr_ctxs); +int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); + +#endif /* CONFIG_DAMON */ + +#ifdef CONFIG_DAMON_VADDR + +/* Monitoring primitives for virtual memory address spaces */ +void damon_va_init(struct damon_ctx *ctx); +void damon_va_update(struct damon_ctx *ctx); +void damon_va_prepare_access_checks(struct damon_ctx *ctx); +unsigned int damon_va_check_accesses(struct damon_ctx *ctx); +bool damon_va_target_valid(void *t); +void damon_va_cleanup(struct damon_ctx *ctx); +void damon_va_set_primitives(struct damon_ctx *ctx); + +#endif /* CONFIG_DAMON_VADDR */ + +#endif /* _DAMON_H */ diff --git a/include/linux/dax.h b/include/linux/dax.h index b52f084aa643..2619d94c308d 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -41,7 +41,6 @@ struct dax_operations { extern struct attribute_group dax_attribute_group; #if IS_ENABLED(CONFIG_DAX) -struct dax_device *dax_get_by_host(const char *host); struct dax_device *alloc_dax(void *private, const char *host, const struct dax_operations *ops, unsigned long flags); void put_dax(struct dax_device *dax_dev); @@ -58,8 +57,6 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev) { __set_dax_synchronous(dax_dev); } -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len); /* * Check if given mapping is supported by the file / underlying device. */ @@ -73,10 +70,6 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, return dax_synchronous(dax_dev); } #else -static inline struct dax_device *dax_get_by_host(const char *host) -{ - return NULL; -} static inline struct dax_device *alloc_dax(void *private, const char *host, const struct dax_operations *ops, unsigned long flags) { @@ -106,12 +99,6 @@ static inline bool dax_synchronous(struct dax_device *dax_dev) static inline void set_dax_synchronous(struct dax_device *dax_dev) { } -static inline bool dax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t len) -{ - return false; -} static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct dax_device *dax_dev) { @@ -122,22 +109,12 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct writeback_control; int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff); #if IS_ENABLED(CONFIG_FS_DAX) -bool __bdev_dax_supported(struct block_device *bdev, int blocksize); -static inline bool bdev_dax_supported(struct block_device *bdev, int blocksize) -{ - return __bdev_dax_supported(bdev, blocksize); -} - -bool __generic_fsdax_supported(struct dax_device *dax_dev, +bool generic_fsdax_supported(struct dax_device *dax_dev, struct block_device *bdev, int blocksize, sector_t start, sector_t sectors); -static inline bool generic_fsdax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t sectors) -{ - return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, - sectors); -} + +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, + int blocksize, sector_t start, sector_t len); static inline void fs_put_dax(struct dax_device *dax_dev) { @@ -153,15 +130,11 @@ struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t st dax_entry_t dax_lock_page(struct page *page); void dax_unlock_page(struct page *page, dax_entry_t cookie); #else -static inline bool bdev_dax_supported(struct block_device *bdev, - int blocksize) -{ - return false; -} +#define generic_fsdax_supported NULL -static inline bool generic_fsdax_supported(struct dax_device *dax_dev, +static inline bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, int blocksize, sector_t start, - sector_t sectors) + sector_t len) { return false; } diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index edb5c186b0b7..3f49e65169c6 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h @@ -3,8 +3,7 @@ #define __LINUX_DEBUG_LOCKING_H #include <linux/atomic.h> -#include <linux/bug.h> -#include <linux/printk.h> +#include <linux/cache.h> struct task_struct; diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h index 82d3d46005a1..8904063d4c9f 100644 --- a/include/linux/dev_printk.h +++ b/include/linux/dev_printk.h @@ -38,8 +38,8 @@ __printf(3, 4) __cold int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); __printf(3, 4) __cold -void dev_printk(const char *level, const struct device *dev, - const char *fmt, ...); +void _dev_printk(const char *level, const struct device *dev, + const char *fmt, ...); __printf(2, 3) __cold void _dev_emerg(const struct device *dev, const char *fmt, ...); __printf(2, 3) __cold @@ -69,7 +69,7 @@ static inline void __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) {} static inline __printf(3, 4) -void dev_printk(const char *level, const struct device *dev, +void _dev_printk(const char *level, const struct device *dev, const char *fmt, ...) {} @@ -98,24 +98,56 @@ void _dev_info(const struct device *dev, const char *fmt, ...) #endif /* + * Need to take variadic arguments even though we don't use them, as dev_fmt() + * may only just have been expanded and may result in multiple arguments. + */ +#define dev_printk_index_emit(level, fmt, ...) \ + printk_index_subsys_emit("%s %s: ", level, fmt) + +#define dev_printk_index_wrap(_p_func, level, dev, fmt, ...) \ + ({ \ + dev_printk_index_emit(level, fmt); \ + _p_func(dev, fmt, ##__VA_ARGS__); \ + }) + +/* + * Some callsites directly call dev_printk rather than going through the + * dev_<level> infrastructure, so we need to emit here as well as inside those + * level-specific macros. Only one index entry will be produced, either way, + * since dev_printk's `fmt` isn't known at compile time if going through the + * dev_<level> macros. + * + * dev_fmt() isn't called for dev_printk when used directly, as it's used by + * the dev_<level> macros internally which already have dev_fmt() processed. + * + * We also can't use dev_printk_index_wrap directly, because we have a separate + * level to process. + */ +#define dev_printk(level, dev, fmt, ...) \ + ({ \ + dev_printk_index_emit(level, fmt); \ + _dev_printk(level, dev, fmt, ##__VA_ARGS__); \ + }) + +/* * #defines for all the dev_<level> macros to prefix with whatever * possible use of #define dev_fmt(fmt) ... */ -#define dev_emerg(dev, fmt, ...) \ - _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_crit(dev, fmt, ...) \ - _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_alert(dev, fmt, ...) \ - _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_err(dev, fmt, ...) \ - _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_warn(dev, fmt, ...) \ - _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_notice(dev, fmt, ...) \ - _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__) -#define dev_info(dev, fmt, ...) \ - _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_emerg(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_emerg, KERN_EMERG, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_crit(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_crit, KERN_CRIT, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_alert(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_alert, KERN_ALERT, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_err(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_err, KERN_ERR, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_warn(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_warn, KERN_WARNING, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_notice(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_notice, KERN_NOTICE, dev, dev_fmt(fmt), ##__VA_ARGS__) +#define dev_info(dev, fmt, ...) \ + dev_printk_index_wrap(_dev_info, KERN_INFO, dev, dev_fmt(fmt), ##__VA_ARGS__) #if defined(CONFIG_DYNAMIC_DEBUG) || \ (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 7457d49acf9a..114553b487ef 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -31,7 +31,7 @@ enum dm_queue_mode { DM_TYPE_DAX_BIO_BASED = 3, }; -typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; +typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE, STATUSTYPE_IMA } status_type_t; union map_info { void *ptr; @@ -151,7 +151,6 @@ typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages); -#define PAGE_SECTORS (PAGE_SIZE / 512) void dm_error(const char *message); @@ -603,6 +602,10 @@ void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm); #define DMEMIT(x...) sz += ((sz >= maxlen) ? \ 0 : scnprintf(result + sz, maxlen - sz, x)) +#define DMEMIT_TARGET_NAME_VERSION(y) \ + DMEMIT("target_name=%s,target_version=%u.%u.%u", \ + (y)->name, (y)->version[0], (y)->version[1], (y)->version[2]) + /* * Definitions of return values from target end_io function. */ diff --git a/include/linux/device.h b/include/linux/device.h index 59940f1744c1..e270cb740b9e 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -407,6 +407,7 @@ struct dev_links_info { * @em_pd: device's energy model performance domain * @pins: For device pin management. * See Documentation/driver-api/pin-control.rst for details. + * @msi_lock: Lock to protect MSI mask cache and mask register * @msi_list: Hosts MSI descriptors * @msi_domain: The generic MSI domain this device is using. * @numa_node: NUMA node this device is close to. @@ -423,6 +424,7 @@ struct dev_links_info { * @dma_pools: Dma pools (if dma'ble device). * @dma_mem: Internal for coherent mem override. * @cma_area: Contiguous memory area for dma allocations + * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use. * @archdata: For arch-specific additions. * @of_node: Associated device tree node. * @fwnode: Associated device node supplied by platform firmware. @@ -506,6 +508,7 @@ struct device { struct dev_pin_info *pins; #endif #ifdef CONFIG_GENERIC_MSI_IRQ + raw_spinlock_t msi_lock; struct list_head msi_list; #endif #ifdef CONFIG_DMA_OPS @@ -532,6 +535,9 @@ struct device { struct cma *cma_area; /* contiguous memory area for dma allocations */ #endif +#ifdef CONFIG_SWIOTLB + struct io_tlb_mem *dma_io_tlb_mem; +#endif /* arch specific additions */ struct dev_archdata archdata; diff --git a/include/linux/dfl.h b/include/linux/dfl.h index 6cc10982351a..431636a0dc78 100644 --- a/include/linux/dfl.h +++ b/include/linux/dfl.h @@ -38,6 +38,7 @@ struct dfl_device { int id; u16 type; u16 feature_id; + u8 revision; struct resource mmio_res; int *irqs; unsigned int num_irqs; diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h index 758ca4694257..24607dc3c2ac 100644 --- a/include/linux/dma-iommu.h +++ b/include/linux/dma-iommu.h @@ -20,6 +20,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain); /* Setup call for arch DMA mapping code */ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 dma_limit); +int iommu_dma_init_fq(struct iommu_domain *domain); /* The DMA API isn't _quite_ the whole story, though... */ /* @@ -54,6 +55,11 @@ static inline void iommu_setup_dma_ops(struct device *dev, u64 dma_base, { } +static inline int iommu_dma_init_fq(struct iommu_domain *domain) +{ + return -EINVAL; +} + static inline int iommu_get_dma_cookie(struct iommu_domain *domain) { return -ENODEV; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 0d53a96a3d64..0d5b06b3a4a6 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -41,8 +41,9 @@ struct dma_map_ops { size_t size, enum dma_data_direction dir, unsigned long attrs); /* - * map_sg returns 0 on error and a value > 0 on success. - * It should never return a value < 0. + * map_sg should return a negative error code on error. See + * dma_map_sgtable() for a list of appropriate error codes + * and their meanings. */ int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); @@ -170,13 +171,6 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr); int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, size_t size, int *ret); - -void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle); -int dma_release_from_global_coherent(int order, void *vaddr); -int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, - size_t size, int *ret); - #else static inline int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, dma_addr_t device_addr, size_t size) @@ -186,7 +180,16 @@ static inline int dma_declare_coherent_memory(struct device *dev, #define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0) #define dma_release_from_dev_coherent(dev, order, vaddr) (0) #define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0) +#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#ifdef CONFIG_DMA_GLOBAL_POOL +void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle); +int dma_release_from_global_coherent(int order, void *vaddr); +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr, + size_t size, int *ret); +int dma_init_global_coherent(phys_addr_t phys_addr, size_t size); +#else static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle) { @@ -201,7 +204,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, { return 0; } -#endif /* CONFIG_DMA_DECLARE_COHERENT */ +#endif /* CONFIG_DMA_GLOBAL_POOL */ /* * This is the actual return value from the ->alloc_noncontiguous method. diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 183e7103a66d..dca2b1355bb1 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -105,11 +105,13 @@ dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, unsigned long attrs); void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs); -int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs); +unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, unsigned long attrs); +int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs); dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs); void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, @@ -164,8 +166,9 @@ static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, unsigned long attrs) { } -static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) +static inline unsigned int dma_map_sg_attrs(struct device *dev, + struct scatterlist *sg, int nents, enum dma_data_direction dir, + unsigned long attrs) { return 0; } @@ -174,6 +177,11 @@ static inline void dma_unmap_sg_attrs(struct device *dev, unsigned long attrs) { } +static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, + enum dma_data_direction dir, unsigned long attrs) +{ + return -EOPNOTSUPP; +} static inline dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, size_t size, enum dma_data_direction dir, unsigned long attrs) @@ -344,34 +352,6 @@ static inline void dma_sync_single_range_for_device(struct device *dev, } /** - * dma_map_sgtable - Map the given buffer for DMA - * @dev: The device for which to perform the DMA operation - * @sgt: The sg_table object describing the buffer - * @dir: DMA direction - * @attrs: Optional DMA attributes for the map operation - * - * Maps a buffer described by a scatterlist stored in the given sg_table - * object for the @dir DMA operation by the @dev device. After success the - * ownership for the buffer is transferred to the DMA domain. One has to - * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the - * ownership of the buffer back to the CPU domain before touching the - * buffer by the CPU. - * - * Returns 0 on success or -EINVAL on error during mapping the buffer. - */ -static inline int dma_map_sgtable(struct device *dev, struct sg_table *sgt, - enum dma_data_direction dir, unsigned long attrs) -{ - int nents; - - nents = dma_map_sg_attrs(dev, sgt->sgl, sgt->orig_nents, dir, attrs); - if (nents <= 0) - return -EINVAL; - sgt->nents = nents; - return 0; -} - -/** * dma_unmap_sgtable - Unmap the given buffer for DMA * @dev: The device for which to perform the DMA operation * @sgt: The sg_table object describing the buffer diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 93c3ca5fdafd..e5c2c9e71bf1 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -380,6 +380,7 @@ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_16_BYTES = 16, DMA_SLAVE_BUSWIDTH_32_BYTES = 32, DMA_SLAVE_BUSWIDTH_64_BYTES = 64, + DMA_SLAVE_BUSWIDTH_128_BYTES = 128, }; /** @@ -398,7 +399,7 @@ enum dma_slave_buswidth { * @src_addr_width: this is the width in bytes of the source (RX) * register where DMA data shall be read. If the source * is memory this may be ignored depending on architecture. - * Legal values: 1, 2, 3, 4, 8, 16, 32, 64. + * Legal values: 1, 2, 3, 4, 8, 16, 32, 64, 128. * @dst_addr_width: same as src_addr_width but for destination * target (TX) mutatis mutandis. * @src_maxburst: the maximum number of words (note: words, as in diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index 1587961f1a7b..c7fa4a3498fe 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -11,60 +11,48 @@ struct dsa_switch; struct sk_buff; struct net_device; -struct packet_type; -struct dsa_8021q_context; -struct dsa_8021q_crosschip_link { +struct dsa_tag_8021q_vlan { struct list_head list; int port; - struct dsa_8021q_context *other_ctx; - int other_port; + u16 vid; refcount_t refcount; }; -struct dsa_8021q_ops { - int (*vlan_add)(struct dsa_switch *ds, int port, u16 vid, u16 flags); - int (*vlan_del)(struct dsa_switch *ds, int port, u16 vid); -}; - struct dsa_8021q_context { - const struct dsa_8021q_ops *ops; struct dsa_switch *ds; - struct list_head crosschip_links; + struct list_head vlans; /* EtherType of RX VID, used for filtering on master interface */ __be16 proto; }; -#define DSA_8021Q_N_SUBVLAN 8 - -int dsa_8021q_setup(struct dsa_8021q_context *ctx, bool enabled); +int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto); -int dsa_8021q_crosschip_bridge_join(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port); - -int dsa_8021q_crosschip_bridge_leave(struct dsa_8021q_context *ctx, int port, - struct dsa_8021q_context *other_ctx, - int other_port); +void dsa_tag_8021q_unregister(struct dsa_switch *ds); struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); -void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, - int *subvlan); +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id); + +int dsa_tag_8021q_bridge_tx_fwd_offload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num); + +void dsa_tag_8021q_bridge_tx_fwd_unoffload(struct dsa_switch *ds, int port, + struct net_device *br, + int bridge_num); + +u16 dsa_8021q_bridge_tx_fwd_offload_vid(int bridge_num); u16 dsa_8021q_tx_vid(struct dsa_switch *ds, int port); u16 dsa_8021q_rx_vid(struct dsa_switch *ds, int port); -u16 dsa_8021q_rx_vid_subvlan(struct dsa_switch *ds, int port, u16 subvlan); - int dsa_8021q_rx_switch_id(u16 vid); int dsa_8021q_rx_source_port(u16 vid); -u16 dsa_8021q_rx_subvlan(u16 vid); - bool vid_is_dsa_8021q_rxvlan(u16 vid); bool vid_is_dsa_8021q_txvlan(u16 vid); diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h index b6089b88314c..171106202fe5 100644 --- a/include/linux/dsa/sja1105.h +++ b/include/linux/dsa/sja1105.h @@ -16,6 +16,8 @@ #define ETH_P_SJA1105_META 0x0008 #define ETH_P_SJA1110 0xdadc +#define SJA1105_DEFAULT_VLAN (VLAN_N_VID - 1) + /* IEEE 802.3 Annex 57A: Slow Protocols PDUs (01:80:C2:xx:xx:xx) */ #define SJA1105_LINKLOCAL_FILTER_A 0x0180C2000000ull #define SJA1105_LINKLOCAL_FILTER_A_MASK 0xFFFFFF000000ull @@ -59,14 +61,12 @@ struct sja1105_skb_cb { ((struct sja1105_skb_cb *)((skb)->cb)) struct sja1105_port { - u16 subvlan_map[DSA_8021Q_N_SUBVLAN]; struct kthread_worker *xmit_worker; struct kthread_work xmit_work; struct sk_buff_head xmit_queue; struct sja1105_tagger_data *data; struct dsa_port *dp; bool hwts_tx_en; - u16 xmit_tpid; }; enum sja1110_meta_tstamp { @@ -89,4 +89,22 @@ static inline void sja1110_process_meta_tstamp(struct dsa_switch *ds, int port, #endif /* IS_ENABLED(CONFIG_NET_DSA_SJA1105_PTP) */ +#if IS_ENABLED(CONFIG_NET_DSA_SJA1105) + +extern const struct dsa_switch_ops sja1105_switch_ops; + +static inline bool dsa_port_is_sja1105(struct dsa_port *dp) +{ + return dp->ds->ops == &sja1105_switch_ops; +} + +#else + +static inline bool dsa_port_is_sja1105(struct dsa_port *dp) +{ + return false; +} + +#endif + #endif /* _NET_DSA_SJA1105_H */ diff --git a/include/linux/edac.h b/include/linux/edac.h index 76d3562d3006..4207d06996a4 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h @@ -184,6 +184,7 @@ static inline char *mc_event_error_type(const unsigned int err_type) * @MEM_DDR5: Unbuffered DDR5 RAM * @MEM_NVDIMM: Non-volatile RAM * @MEM_WIO2: Wide I/O 2. + * @MEM_HBM2: High bandwidth Memory Gen 2. */ enum mem_type { MEM_EMPTY = 0, @@ -212,6 +213,7 @@ enum mem_type { MEM_DDR5, MEM_NVDIMM, MEM_WIO2, + MEM_HBM2, }; #define MEM_FLAG_EMPTY BIT(MEM_EMPTY) @@ -239,6 +241,7 @@ enum mem_type { #define MEM_FLAG_DDR5 BIT(MEM_DDR5) #define MEM_FLAG_NVDIMM BIT(MEM_NVDIMM) #define MEM_FLAG_WIO2 BIT(MEM_WIO2) +#define MEM_FLAG_HBM2 BIT(MEM_HBM2) /** * enum edac_type - Error Detection and Correction capabilities and mode diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h index 3f221dbf5f95..39dcadd492b5 100644 --- a/include/linux/energy_model.h +++ b/include/linux/energy_model.h @@ -11,7 +11,7 @@ #include <linux/types.h> /** - * em_perf_state - Performance state of a performance domain + * struct em_perf_state - Performance state of a performance domain * @frequency: The frequency in KHz, for consistency with CPUFreq * @power: The power consumed at this level (by 1 CPU or by a registered * device). It can be a total power: static and dynamic. @@ -25,7 +25,7 @@ struct em_perf_state { }; /** - * em_perf_domain - Performance domain + * struct em_perf_domain - Performance domain * @table: List of performance states, in ascending order * @nr_perf_states: Number of performance states * @milliwatts: Flag indicating the power values are in milli-Watts @@ -53,6 +53,22 @@ struct em_perf_domain { #ifdef CONFIG_ENERGY_MODEL #define EM_MAX_POWER 0xFFFF +/* + * Increase resolution of energy estimation calculations for 64-bit + * architectures. The extra resolution improves decision made by EAS for the + * task placement when two Performance Domains might provide similar energy + * estimation values (w/o better resolution the values could be equal). + * + * We increase resolution only if we have enough bits to allow this increased + * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit + * are pretty high and the returns do not justify the increased costs. + */ +#ifdef CONFIG_64BIT +#define em_scale_power(p) ((p) * 1000) +#else +#define em_scale_power(p) (p) +#endif + struct em_data_callback { /** * active_power() - Provide power at the next performance state of @@ -87,12 +103,12 @@ void em_dev_unregister_perf_domain(struct device *dev); /** * em_cpu_energy() - Estimates the energy consumed by the CPUs of a - performance domain + * performance domain * @pd : performance domain for which energy has to be estimated * @max_util : highest utilization among CPUs of the domain * @sum_util : sum of the utilization of all CPUs in the domain * @allowed_cpu_cap : maximum allowed CPU capacity for the @pd, which - might reflect reduced frequency (due to thermal) + * might reflect reduced frequency (due to thermal) * * This function must be used only for CPU devices. There is no validation, * i.e. if the EM is a CPU type and has cpumask allocated. It is called from diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h index 136b8d97d8c0..0d7865a0731c 100644 --- a/include/linux/entry-kvm.h +++ b/include/linux/entry-kvm.h @@ -2,7 +2,11 @@ #ifndef __LINUX_ENTRYKVM_H #define __LINUX_ENTRYKVM_H -#include <linux/entry-common.h> +#include <linux/static_call_types.h> +#include <linux/tracehook.h> +#include <linux/syscalls.h> +#include <linux/seccomp.h> +#include <linux/sched.h> #include <linux/tick.h> /* Transfer to guest mode work */ diff --git a/include/linux/errno.h b/include/linux/errno.h index d73f597a2484..8b0c754bab02 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h @@ -31,5 +31,6 @@ #define EJUKEBOX 528 /* Request initiated, but will not complete before timeout */ #define EIOCBQUEUED 529 /* iocb queued, will get completion event */ #define ERECALLCONFLICT 530 /* conflict with recalled state */ +#define ENOGRACE 531 /* NFS file lock reclaim refused */ #endif diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 330345b1be54..928c411bd509 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -300,6 +300,18 @@ static inline void ether_addr_copy(u8 *dst, const u8 *src) } /** + * eth_hw_addr_set - Assign Ethernet address to a net_device + * @dev: pointer to net_device structure + * @addr: address to assign + * + * Assign given address to the net_device, addr_assign_type is not changed. + */ +static inline void eth_hw_addr_set(struct net_device *dev, const u8 *addr) +{ + ether_addr_copy(dev->dev_addr, addr); +} + +/** * eth_hw_addr_inherit - Copy dev_addr from another net_device * @dst: pointer to net_device to copy dev_addr to * @src: pointer to net_device to copy dev_addr from diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 232daaec56e4..849524b55d89 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -15,10 +15,9 @@ #include <linux/bitmap.h> #include <linux/compat.h> +#include <linux/netlink.h> #include <uapi/linux/ethtool.h> -#ifdef CONFIG_COMPAT - struct compat_ethtool_rx_flow_spec { u32 flow_type; union ethtool_flow_union h_u; @@ -38,8 +37,6 @@ struct compat_ethtool_rxnfc { u32 rule_locs[]; }; -#endif /* CONFIG_COMPAT */ - #include <linux/rculist.h> /** @@ -176,6 +173,11 @@ extern int __ethtool_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *link_ksettings); +struct kernel_ethtool_coalesce { + u8 use_cqe_mode_tx; + u8 use_cqe_mode_rx; +}; + /** * ethtool_intersect_link_masks - Given two link masks, AND them together * @dst: first mask and where result is stored @@ -215,7 +217,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, #define ETHTOOL_COALESCE_TX_USECS_HIGH BIT(19) #define ETHTOOL_COALESCE_TX_MAX_FRAMES_HIGH BIT(20) #define ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL BIT(21) -#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(21, 0) +#define ETHTOOL_COALESCE_USE_CQE_RX BIT(22) +#define ETHTOOL_COALESCE_USE_CQE_TX BIT(23) +#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(23, 0) #define ETHTOOL_COALESCE_USECS \ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) @@ -241,6 +245,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, ETHTOOL_COALESCE_RX_USECS_LOW | ETHTOOL_COALESCE_RX_USECS_HIGH | \ ETHTOOL_COALESCE_PKT_RATE_LOW | ETHTOOL_COALESCE_PKT_RATE_HIGH | \ ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL) +#define ETHTOOL_COALESCE_USE_CQE \ + (ETHTOOL_COALESCE_USE_CQE_RX | ETHTOOL_COALESCE_USE_CQE_TX) #define ETHTOOL_STAT_NOT_SET (~0ULL) @@ -606,8 +612,14 @@ struct ethtool_ops { struct ethtool_eeprom *, u8 *); int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); - int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); - int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*get_coalesce)(struct net_device *, + struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *); + int (*set_coalesce)(struct net_device *, + struct ethtool_coalesce *, + struct kernel_ethtool_coalesce *, + struct netlink_ext_ack *); void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); int (*set_ringparam)(struct net_device *, diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index fa0a524baed0..305d5f19093b 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/percpu-defs.h> #include <linux/percpu.h> +#include <linux/sched.h> /* * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining @@ -43,11 +44,9 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w __u64 *cnt); void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt); -DECLARE_PER_CPU(int, eventfd_wake_count); - -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_allowed(void) { - return this_cpu_read(eventfd_wake_count); + return !current->in_eventfd_signal; } #else /* CONFIG_EVENTFD */ @@ -78,9 +77,9 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, return -ENOSYS; } -static inline bool eventfd_signal_count(void) +static inline bool eventfd_signal_allowed(void) { - return false; + return true; } static inline void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h index 593322c946e6..3337745d81bd 100644 --- a/include/linux/eventpoll.h +++ b/include/linux/eventpoll.h @@ -68,4 +68,22 @@ static inline void eventpoll_release(struct file *file) {} #endif +#if defined(CONFIG_ARM) && defined(CONFIG_OABI_COMPAT) +/* ARM OABI has an incompatible struct layout and needs a special handler */ +extern struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent); +#else +static inline struct epoll_event __user * +epoll_put_uevent(__poll_t revents, __u64 data, + struct epoll_event __user *uevent) +{ + if (__put_user(revents, &uevent->events) || + __put_user(data, &uevent->data)) + return NULL; + + return uevent+1; +} +#endif + #endif /* #ifndef _LINUX_EVENTPOLL_H */ diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index fe848901fcc3..3260fe714846 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h @@ -221,6 +221,8 @@ struct export_operations { #define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply atomic attribute updates */ +#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do + asychronous blocking locks */ unsigned long flags; }; diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index a16dbeced152..eec3b7c40811 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h @@ -27,6 +27,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ #define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME) +#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD) + /* * fanotify_init() flags that require CAP_SYS_ADMIN. * We do not allow unprivileged groups to request permission events. @@ -35,6 +37,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */ */ #define FANOTIFY_ADMIN_INIT_FLAGS (FANOTIFY_PERM_CLASSES | \ FAN_REPORT_TID | \ + FAN_REPORT_PIDFD | \ FAN_UNLIMITED_QUEUE | \ FAN_UNLIMITED_MARKS) diff --git a/include/linux/fiemap.h b/include/linux/fiemap.h index 4e624c466583..c50882f19235 100644 --- a/include/linux/fiemap.h +++ b/include/linux/fiemap.h @@ -18,8 +18,4 @@ int fiemap_prep(struct inode *inode, struct fiemap_extent_info *fieinfo, int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, u64 phys, u64 len, u32 flags); -int generic_block_fiemap(struct inode *inode, - struct fiemap_extent_info *fieinfo, u64 start, u64 len, - get_block_t *get_block); - #endif /* _LINUX_FIEMAP_H 1 */ diff --git a/include/linux/file.h b/include/linux/file.h index 2de2e4613d7b..51e830b4fe3a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -94,6 +94,9 @@ extern void fd_install(unsigned int fd, struct file *file); extern int __receive_fd(struct file *file, int __user *ufd, unsigned int o_flags); + +extern int receive_fd(struct file *file, unsigned int o_flags); + static inline int receive_fd_user(struct file *file, int __user *ufd, unsigned int o_flags) { @@ -101,10 +104,6 @@ static inline int receive_fd_user(struct file *file, int __user *ufd, return -EFAULT; return __receive_fd(file, ufd, o_flags); } -static inline int receive_fd(struct file *file, unsigned int o_flags) -{ - return __receive_fd(file, NULL, o_flags); -} int receive_fd_replace(int new_fd, struct file *file, unsigned int o_flags); extern void flush_delayed_fput(void); diff --git a/include/linux/filter.h b/include/linux/filter.h index 472f97074da0..4a93c12543ee 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -5,8 +5,6 @@ #ifndef __LINUX_FILTER_H__ #define __LINUX_FILTER_H__ -#include <stdarg.h> - #include <linux/atomic.h> #include <linux/refcount.h> #include <linux/compat.h> @@ -73,6 +71,11 @@ struct ctl_table_header; /* unused opcode to mark call to interpreter with arguments */ #define BPF_CALL_ARGS 0xe0 +/* unused opcode to mark speculation barrier for mitigating + * Speculative Store Bypass + */ +#define BPF_NOSPEC 0xc0 + /* As per nm, we expose JITed images as text (code) section for * kallsyms. That way, tools like perf can find it to match * addresses. @@ -390,6 +393,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn) .off = 0, \ .imm = 0 }) +/* Speculation barrier */ + +#define BPF_ST_NOSPEC() \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_NOSPEC, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + /* Internal classic blocks for direct assignment */ #define __BPF_STMT(CODE, K) \ @@ -559,7 +572,8 @@ struct bpf_prog { kprobe_override:1, /* Do we override a kprobe? */ has_callchain_buf:1, /* callchain buffer allocated? */ enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ - call_get_stack:1; /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ + call_get_func_ip:1; /* Do we call get_func_ip() */ enum bpf_prog_type type; /* Type of BPF program */ enum bpf_attach_type expected_attach_type; /* For some prog types */ u32 len; /* Number of filter blocks */ @@ -584,25 +598,38 @@ struct sk_filter { DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); -#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \ - u32 __ret; \ - cant_migrate(); \ - if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ - struct bpf_prog_stats *__stats; \ - u64 __start = sched_clock(); \ - __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - __stats = this_cpu_ptr(prog->stats); \ - u64_stats_update_begin(&__stats->syncp); \ - __stats->cnt++; \ - __stats->nsecs += sched_clock() - __start; \ - u64_stats_update_end(&__stats->syncp); \ - } else { \ - __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ - } \ - __ret; }) - -#define BPF_PROG_RUN(prog, ctx) \ - __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func) +typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, + const struct bpf_insn *insnsi, + unsigned int (*bpf_func)(const void *, + const struct bpf_insn *)); + +static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, + const void *ctx, + bpf_dispatcher_fn dfunc) +{ + u32 ret; + + cant_migrate(); + if (static_branch_unlikely(&bpf_stats_enabled_key)) { + struct bpf_prog_stats *stats; + u64 start = sched_clock(); + + ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + stats = this_cpu_ptr(prog->stats); + u64_stats_update_begin(&stats->syncp); + stats->cnt++; + stats->nsecs += sched_clock() - start; + u64_stats_update_end(&stats->syncp); + } else { + ret = dfunc(ctx, prog->insnsi, prog->bpf_func); + } + return ret; +} + +static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx) +{ + return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func); +} /* * Use in preemptible and therefore migratable context to make sure that @@ -621,7 +648,7 @@ static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, u32 ret; migrate_disable(); - ret = __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func); + ret = bpf_prog_run(prog, ctx); migrate_enable(); return ret; } @@ -694,7 +721,7 @@ static inline void bpf_restore_data_end( cb->data_end = saved_data_end; } -static inline u8 *bpf_skb_cb(struct sk_buff *skb) +static inline u8 *bpf_skb_cb(const struct sk_buff *skb) { /* eBPF programs may read/write skb->cb[] area to transfer meta * data between tail calls. Since this also needs to work with @@ -715,8 +742,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) /* Must be invoked with migration disabled */ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, - struct sk_buff *skb) + const void *ctx) { + const struct sk_buff *skb = ctx; u8 *cb_data = bpf_skb_cb(skb); u8 cb_saved[BPF_SKB_CB_LEN]; u32 res; @@ -726,7 +754,7 @@ static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, memset(cb_data, 0, sizeof(cb_saved)); } - res = BPF_PROG_RUN(prog, skb); + res = bpf_prog_run(prog, skb); if (unlikely(prog->cb_access)) memcpy(cb_data, cb_saved, sizeof(cb_saved)); @@ -760,6 +788,10 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, DECLARE_BPF_DISPATCHER(xdp) +DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); + +u32 xdp_master_redirect(struct xdp_buff *xdp); + static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, struct xdp_buff *xdp) { @@ -767,7 +799,14 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, * under local_bh_disable(), which provides the needed RCU protection * for accessing map entries. */ - return __BPF_PROG_RUN(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); + + if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { + if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) + act = xdp_master_redirect(xdp); + } + + return act; } void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); @@ -1413,7 +1452,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, }; u32 act; - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; @@ -1451,7 +1490,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, }; u32 act; - act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, BPF_PROG_RUN); + act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); if (act == SK_PASS) { selected_sk = ctx.selected_sk; no_reuseport = ctx.no_reuseport; diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 9d1a5c175065..56b426fe020c 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -52,6 +52,10 @@ #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U #define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U +/* Loader commands */ +#define PM_LOAD_PDI 0x701 +#define PDI_SRC_DDR 0xF + /* * Firmware FPGA Manager flags * XILINX_ZYNQMP_PM_FPGA_FULL: FPGA full reconfiguration @@ -411,6 +415,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, u32 *value); int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, u32 value); +int zynqmp_pm_load_pdi(const u32 src, const u64 address); #else static inline int zynqmp_pm_get_api_version(u32 *version) { @@ -622,6 +627,11 @@ static inline int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, { return -ENODEV; } + +static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address) +{ + return -ENODEV; +} #endif #endif /* __FIRMWARE_ZYNQMP_H__ */ diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index ec2cd8bfceb0..474c1f506307 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h @@ -110,7 +110,7 @@ struct fpga_image_info { * @initial_header_size: Maximum number of bytes that should be passed into write_init * @state: returns an enum value of the FPGA's state * @status: returns status of the FPGA, including reconfiguration error code - * @write_init: prepare the FPGA to receive confuration data + * @write_init: prepare the FPGA to receive configuration data * @write: write count bytes of configuration data to the FPGA * @write_sg: write the scatter list of configuration data to the FPGA * @write_complete: set FPGA to operating state after writing is done diff --git a/include/linux/fs.h b/include/linux/fs.h index 640574294216..e7a633353fd2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -319,6 +319,8 @@ enum rw_hint { /* iocb->ki_waitq is valid */ #define IOCB_WAITQ (1 << 19) #define IOCB_NOIO (1 << 20) +/* can use bio alloc cache */ +#define IOCB_ALLOC_CACHE (1 << 21) struct kiocb { struct file *ki_filp; @@ -436,6 +438,10 @@ int pagecache_write_end(struct file *, struct address_space *mapping, * struct address_space - Contents of a cacheable, mappable object. * @host: Owner, either the inode or the block_device. * @i_pages: Cached pages. + * @invalidate_lock: Guards coherency between page cache contents and + * file offset->disk block mappings in the filesystem during invalidates. + * It is also used to block modification of page cache contents through + * memory mappings. * @gfp_mask: Memory allocation flags to use for allocating pages. * @i_mmap_writable: Number of VM_SHARED mappings. * @nr_thps: Number of THPs in the pagecache (non-shmem only). @@ -453,6 +459,7 @@ int pagecache_write_end(struct file *, struct address_space *mapping, struct address_space { struct inode *host; struct xarray i_pages; + struct rw_semaphore invalidate_lock; gfp_t gfp_mask; atomic_t i_mmap_writable; #ifdef CONFIG_READ_ONLY_THP_FOR_FS @@ -581,6 +588,11 @@ static inline void mapping_allow_writable(struct address_space *mapping) struct posix_acl; #define ACL_NOT_CACHED ((void *)(-1)) +/* + * ACL_DONT_CACHE is for stacked filesystems, that rely on underlying fs to + * cache the ACL. This also means that ->get_acl() can be called in RCU mode + * with the LOOKUP_RCU flag. + */ #define ACL_DONT_CACHE ((void *)(-3)) static inline struct posix_acl * @@ -814,9 +826,42 @@ static inline void inode_lock_shared_nested(struct inode *inode, unsigned subcla down_read_nested(&inode->i_rwsem, subclass); } +static inline void filemap_invalidate_lock(struct address_space *mapping) +{ + down_write(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_unlock(struct address_space *mapping) +{ + up_write(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_lock_shared(struct address_space *mapping) +{ + down_read(&mapping->invalidate_lock); +} + +static inline int filemap_invalidate_trylock_shared( + struct address_space *mapping) +{ + return down_read_trylock(&mapping->invalidate_lock); +} + +static inline void filemap_invalidate_unlock_shared( + struct address_space *mapping) +{ + up_read(&mapping->invalidate_lock); +} + void lock_two_nondirectories(struct inode *, struct inode*); void unlock_two_nondirectories(struct inode *, struct inode*); +void filemap_invalidate_lock_two(struct address_space *mapping1, + struct address_space *mapping2); +void filemap_invalidate_unlock_two(struct address_space *mapping1, + struct address_space *mapping2); + + /* * NOTE: in a 32bit arch with a preemptable kernel and * an UP compile the i_size_read/write must be atomic @@ -997,6 +1042,7 @@ static inline struct file *get_file(struct file *f) #define FL_UNLOCK_PENDING 512 /* Lease is being broken */ #define FL_OFDLCK 1024 /* lock is "owned" by struct file */ #define FL_LAYOUT 2048 /* outstanding pNFS layout */ +#define FL_RECLAIM 4096 /* reclaiming from a reboot server */ #define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE) @@ -1507,8 +1553,11 @@ struct super_block { /* Number of inodes with nlink == 0 but still referenced */ atomic_long_t s_remove_count; - /* Pending fsnotify inode refs */ - atomic_long_t s_fsnotify_inode_refs; + /* + * Number of inode/mount/sb objects that are being watched, note that + * inodes objects are currently double-accounted. + */ + atomic_long_t s_fsnotify_connectors; /* Being remounted read-only */ int s_readonly_remount; @@ -2065,7 +2114,7 @@ struct inode_operations { struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int); const char * (*get_link) (struct dentry *, struct inode *, struct delayed_call *); int (*permission) (struct user_namespace *, struct inode *, int); - struct posix_acl * (*get_acl)(struct inode *, int); + struct posix_acl * (*get_acl)(struct inode *, int, bool); int (*readlink) (struct dentry *, char __user *,int); @@ -2457,7 +2506,6 @@ static inline void file_accessed(struct file *file) extern int file_modified(struct file *file); -int sync_inode(struct inode *inode, struct writeback_control *wbc); int sync_inode_metadata(struct inode *inode, int wait); struct file_system_type { @@ -2487,6 +2535,7 @@ struct file_system_type { struct lock_class_key i_lock_key; struct lock_class_key i_mutex_key; + struct lock_class_key invalidate_lock_key; struct lock_class_key i_mutex_dir_key; }; @@ -2570,90 +2619,6 @@ extern struct kobject *fs_kobj; #define MAX_RW_COUNT (INT_MAX & PAGE_MASK) -#ifdef CONFIG_MANDATORY_FILE_LOCKING -extern int locks_mandatory_locked(struct file *); -extern int locks_mandatory_area(struct inode *, struct file *, loff_t, loff_t, unsigned char); - -/* - * Candidates for mandatory locking have the setgid bit set - * but no group execute bit - an otherwise meaningless combination. - */ - -static inline int __mandatory_lock(struct inode *ino) -{ - return (ino->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID; -} - -/* - * ... and these candidates should be on SB_MANDLOCK mounted fs, - * otherwise these will be advisory locks - */ - -static inline int mandatory_lock(struct inode *ino) -{ - return IS_MANDLOCK(ino) && __mandatory_lock(ino); -} - -static inline int locks_verify_locked(struct file *file) -{ - if (mandatory_lock(locks_inode(file))) - return locks_mandatory_locked(file); - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, - struct file *f, - loff_t size) -{ - if (!inode->i_flctx || !mandatory_lock(inode)) - return 0; - - if (size < inode->i_size) { - return locks_mandatory_area(inode, f, size, inode->i_size - 1, - F_WRLCK); - } else { - return locks_mandatory_area(inode, f, inode->i_size, size - 1, - F_WRLCK); - } -} - -#else /* !CONFIG_MANDATORY_FILE_LOCKING */ - -static inline int locks_mandatory_locked(struct file *file) -{ - return 0; -} - -static inline int locks_mandatory_area(struct inode *inode, struct file *filp, - loff_t start, loff_t end, unsigned char type) -{ - return 0; -} - -static inline int __mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int mandatory_lock(struct inode *inode) -{ - return 0; -} - -static inline int locks_verify_locked(struct file *file) -{ - return 0; -} - -static inline int locks_verify_truncate(struct inode *inode, struct file *filp, - size_t size) -{ - return 0; -} - -#endif /* CONFIG_MANDATORY_FILE_LOCKING */ - - #ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { @@ -2786,6 +2751,7 @@ static inline struct file *file_clone_open(struct file *file) extern int filp_close(struct file *, fl_owner_t id); extern struct filename *getname_flags(const char __user *, int, int *); +extern struct filename *getname_uflags(const char __user *, int); extern struct filename *getname(const char __user *); extern struct filename *getname_kernel(const char *); extern void putname(struct filename *name); @@ -2891,6 +2857,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, loff_t end); extern int filemap_check_errors(struct address_space *mapping); extern void __filemap_set_wb_err(struct address_space *mapping, int err); +int filemap_fdatawrite_wbc(struct address_space *mapping, + struct writeback_control *wbc); static inline int filemap_write_and_wait(struct address_space *mapping) { @@ -3055,15 +3023,20 @@ static inline void file_end_write(struct file *file) } /* + * This is used for regular files where some users -- especially the + * currently executed binary in a process, previously handled via + * VM_DENYWRITE -- cannot handle concurrent write (and maybe mmap + * read-write shared) accesses. + * * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. - * This is used for regular files. - * We cannot support write (and maybe mmap read-write shared) accesses and - * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode - * can have the following values: - * 0: no writers, no VM_DENYWRITE mappings - * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist - * > 0: (i_writecount) users are writing to the file. + * deny_write_access() denies write access to a file. + * allow_write_access() re-enables write access to a file. + * + * The i_writecount field of an inode can have the following values: + * 0: no write access, no denied write access + * < 0: (-i_writecount) users that denied write access to the file. + * > 0: (i_writecount) users that have write access to the file. * * Normally we operate on that counter with atomic_{inc,dec} and it's safe * except for the cases where we don't hold i_writecount yet. Then we need to @@ -3246,10 +3219,6 @@ ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb, ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb, struct iov_iter *iter); -/* fs/block_dev.c */ -extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, - int datasync); - /* fs/splice.c */ extern ssize_t generic_file_splice_read(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); @@ -3355,6 +3324,7 @@ extern int page_symlink(struct inode *inode, const char *symname, int len); extern const struct inode_operations page_symlink_inode_operations; extern void kfree_link(void *); void generic_fillattr(struct user_namespace *, struct inode *, struct kstat *); +void generic_fill_statx_attr(struct inode *inode, struct kstat *stat); extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int); extern int vfs_getattr(const struct path *, struct kstat *, u32, unsigned int); void __inode_add_bytes(struct inode *inode, loff_t bytes); @@ -3469,6 +3439,8 @@ extern int buffer_migrate_page_norefs(struct address_space *, #define buffer_migrate_page_norefs NULL #endif +int may_setattr(struct user_namespace *mnt_userns, struct inode *inode, + unsigned int ia_valid); int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); extern int inode_newsize_ok(const struct inode *, loff_t offset); void setattr_copy(struct user_namespace *, struct inode *inode, @@ -3622,7 +3594,7 @@ int proc_nr_dentry(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); int proc_nr_inodes(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos); -int __init get_filesystem_list(char *buf); +int __init list_bdev_fs_names(char *buf, size_t size); #define __FMODE_EXEC ((__force int) FMODE_EXEC) #define __FMODE_NONOTIFY ((__force int) FMODE_NONOTIFY) diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h index e2bc16300c82..6b54982fc5f3 100644 --- a/include/linux/fs_context.h +++ b/include/linux/fs_context.h @@ -141,6 +141,7 @@ extern int vfs_get_tree(struct fs_context *fc); extern void put_fs_context(struct fs_context *fc); extern int vfs_parse_fs_param_source(struct fs_context *fc, struct fs_parameter *param); +extern void fc_drop_locked(struct fs_context *fc); /* * sget() wrappers to be called from the ->get_tree() op. diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 3235ddbdcc09..8d39491c5f9f 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h @@ -147,7 +147,6 @@ struct fscache_retrieval { fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ void *context; /* netfs read context (pinned) */ struct list_head to_do; /* list of things to be done by the backend */ - unsigned long start_time; /* time at which retrieval started */ atomic_t n_pages; /* number of pages to be retrieved */ }; @@ -385,9 +384,6 @@ struct fscache_object { struct list_head dependents; /* FIFO of dependent objects */ struct list_head dep_link; /* link in parent's dependents list */ struct list_head pending_ops; /* unstarted operations on this object */ -#ifdef CONFIG_FSCACHE_OBJECT_LIST - struct rb_node objlist_link; /* link in global object list */ -#endif pgoff_t store_limit; /* current storage limit */ loff_t store_limit_l; /* current storage limit */ }; diff --git a/include/linux/fscache.h b/include/linux/fscache.h index abc1c4737fb8..a4dab5998613 100644 --- a/include/linux/fscache.h +++ b/include/linux/fscache.h @@ -123,15 +123,17 @@ struct fscache_netfs { * - indices are created on disk just-in-time */ struct fscache_cookie { - atomic_t usage; /* number of users of this cookie */ + refcount_t ref; /* number of users of this cookie */ atomic_t n_children; /* number of children of this cookie */ atomic_t n_active; /* number of active users of netfs ptrs */ + unsigned int debug_id; spinlock_t lock; spinlock_t stores_lock; /* lock on page store tree */ struct hlist_head backing_objects; /* object(s) backing this file/index */ const struct fscache_cookie_def *def; /* definition */ struct fscache_cookie *parent; /* parent of this entry */ struct hlist_bl_node hash_link; /* Link in hash table */ + struct list_head proc_link; /* Link in proc list */ void *netfs_data; /* back pointer to netfs */ struct radix_tree_root stores; /* pages to be stored on this cookie */ #define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */ diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 2ea1387bb497..e912ed9141d9 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -47,27 +47,128 @@ struct fscrypt_name { #define FSCRYPT_SET_CONTEXT_MAX_SIZE 40 #ifdef CONFIG_FS_ENCRYPTION + /* - * fscrypt superblock flags + * If set, the fscrypt bounce page pool won't be allocated (unless another + * filesystem needs it). Set this if the filesystem always uses its own bounce + * pages for writes and therefore won't need the fscrypt bounce page pool. */ #define FS_CFLG_OWN_PAGES (1U << 1) -/* - * crypto operations for filesystems - */ +/* Crypto operations for filesystems */ struct fscrypt_operations { + + /* Set of optional flags; see above for allowed flags */ unsigned int flags; + + /* + * If set, this is a filesystem-specific key description prefix that + * will be accepted for "logon" keys for v1 fscrypt policies, in + * addition to the generic prefix "fscrypt:". This functionality is + * deprecated, so new filesystems shouldn't set this field. + */ const char *key_prefix; + + /* + * Get the fscrypt context of the given inode. + * + * @inode: the inode whose context to get + * @ctx: the buffer into which to get the context + * @len: length of the @ctx buffer in bytes + * + * Return: On success, returns the length of the context in bytes; this + * may be less than @len. On failure, returns -ENODATA if the + * inode doesn't have a context, -ERANGE if the context is + * longer than @len, or another -errno code. + */ int (*get_context)(struct inode *inode, void *ctx, size_t len); + + /* + * Set an fscrypt context on the given inode. + * + * @inode: the inode whose context to set. The inode won't already have + * an fscrypt context. + * @ctx: the context to set + * @len: length of @ctx in bytes (at most FSCRYPT_SET_CONTEXT_MAX_SIZE) + * @fs_data: If called from fscrypt_set_context(), this will be the + * value the filesystem passed to fscrypt_set_context(). + * Otherwise (i.e. when called from + * FS_IOC_SET_ENCRYPTION_POLICY) this will be NULL. + * + * i_rwsem will be held for write. + * + * Return: 0 on success, -errno on failure. + */ int (*set_context)(struct inode *inode, const void *ctx, size_t len, void *fs_data); + + /* + * Get the dummy fscrypt policy in use on the filesystem (if any). + * + * Filesystems only need to implement this function if they support the + * test_dummy_encryption mount option. + * + * Return: A pointer to the dummy fscrypt policy, if the filesystem is + * mounted with test_dummy_encryption; otherwise NULL. + */ const union fscrypt_policy *(*get_dummy_policy)(struct super_block *sb); + + /* + * Check whether a directory is empty. i_rwsem will be held for write. + */ bool (*empty_dir)(struct inode *inode); + + /* The filesystem's maximum ciphertext filename length, in bytes */ unsigned int max_namelen; + + /* + * Check whether the filesystem's inode numbers and UUID are stable, + * meaning that they will never be changed even by offline operations + * such as filesystem shrinking and therefore can be used in the + * encryption without the possibility of files becoming unreadable. + * + * Filesystems only need to implement this function if they want to + * support the FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags. These + * flags are designed to work around the limitations of UFS and eMMC + * inline crypto hardware, and they shouldn't be used in scenarios where + * such hardware isn't being used. + * + * Leaving this NULL is equivalent to always returning false. + */ bool (*has_stable_inodes)(struct super_block *sb); + + /* + * Get the number of bits that the filesystem uses to represent inode + * numbers and file logical block numbers. + * + * By default, both of these are assumed to be 64-bit. This function + * can be implemented to declare that either or both of these numbers is + * shorter, which may allow the use of the + * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of + * inline crypto hardware whose maximum DUN length is less than 64 bits + * (e.g., eMMC v5.2 spec compliant hardware). This function only needs + * to be implemented if support for one of these features is needed. + */ void (*get_ino_and_lblk_bits)(struct super_block *sb, int *ino_bits_ret, int *lblk_bits_ret); + + /* + * Return the number of block devices to which the filesystem may write + * encrypted file contents. + * + * If the filesystem can use multiple block devices (other than block + * devices that aren't used for encrypted file contents, such as + * external journal devices), and wants to support inline encryption, + * then it must implement this function. Otherwise it's not needed. + */ int (*get_num_devices)(struct super_block *sb); + + /* + * If ->get_num_devices() returns a value greater than 1, then this + * function is called to get the array of request_queues that the + * filesystem is using -- one per block device. (There may be duplicate + * entries in this array, as block devices can share a request_queue.) + */ void (*get_devices)(struct super_block *sb, struct request_queue **devs); }; @@ -253,6 +354,7 @@ int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, unsigned int max_size, struct delayed_call *done); +int fscrypt_symlink_getattr(const struct path *path, struct kstat *stat); static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { @@ -583,6 +685,12 @@ static inline const char *fscrypt_get_symlink(struct inode *inode, return ERR_PTR(-EOPNOTSUPP); } +static inline int fscrypt_symlink_getattr(const struct path *path, + struct kstat *stat) +{ + return -EOPNOTSUPP; +} + static inline void fscrypt_set_ops(struct super_block *sb, const struct fscrypt_operations *s_cop) { diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 63b56aba925a..30ece3ae6df7 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h @@ -423,7 +423,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); -struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev); +struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, + u16 if_id); extern struct bus_type fsl_mc_bus_type; diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index f8acddcf54fb..12d3a7d308ab 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -30,6 +30,9 @@ static inline void fsnotify_name(struct inode *dir, __u32 mask, struct inode *child, const struct qstr *name, u32 cookie) { + if (atomic_long_read(&dir->i_sb->s_fsnotify_connectors) == 0) + return; + fsnotify(mask, child, FSNOTIFY_EVENT_INODE, dir, name, NULL, cookie); } @@ -41,6 +44,9 @@ static inline void fsnotify_dirent(struct inode *dir, struct dentry *dentry, static inline void fsnotify_inode(struct inode *inode, __u32 mask) { + if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) + return; + if (S_ISDIR(inode->i_mode)) mask |= FS_ISDIR; @@ -53,6 +59,9 @@ static inline int fsnotify_parent(struct dentry *dentry, __u32 mask, { struct inode *inode = d_inode(dentry); + if (atomic_long_read(&inode->i_sb->s_fsnotify_connectors) == 0) + return 0; + if (S_ISDIR(inode->i_mode)) { mask |= FS_ISDIR; diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index a69f363b61bf..832e65f06754 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -643,6 +643,22 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; } extern int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr); +/** + * ftrace_need_init_nop - return whether nop call sites should be initialized + * + * Normally the compiler's -mnop-mcount generates suitable nops, so we don't + * need to call ftrace_init_nop() if the code is built with that flag. + * Architectures where this is not always the case may define their own + * condition. + * + * Return must be: + * 0 if ftrace_init_nop() should be called + * Nonzero if ftrace_init_nop() should not be called + */ + +#ifndef ftrace_need_init_nop +#define ftrace_need_init_nop() (!__is_defined(CC_USING_NOP_MCOUNT)) +#endif /** * ftrace_init_nop - initialize a nop call site diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index bc738504ab4a..c285968e437a 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -8,34 +8,11 @@ /* All generic netlink requests are serialized by a global lock. */ extern void genl_lock(void); extern void genl_unlock(void); -#ifdef CONFIG_LOCKDEP -extern bool lockdep_genl_is_held(void); -#endif /* for synchronisation between af_netlink and genetlink */ extern atomic_t genl_sk_destructing_cnt; extern wait_queue_head_t genl_sk_destructing_waitq; -/** - * rcu_dereference_genl - rcu_dereference with debug checking - * @p: The pointer to read, prior to dereferencing - * - * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() - * or genl mutex. Note : Please prefer genl_dereference() or rcu_dereference() - */ -#define rcu_dereference_genl(p) \ - rcu_dereference_check(p, lockdep_genl_is_held()) - -/** - * genl_dereference - fetch RCU pointer when updates are prevented by genl mutex - * @p: The pointer to read, prior to dereferencing - * - * Return the value of the specified RCU-protected pointer, but omit - * the READ_ONCE(), because caller holds genl mutex. - */ -#define genl_dereference(p) \ - rcu_dereference_protected(p, lockdep_genl_is_held()) - #define MODULE_ALIAS_GENL_FAMILY(family)\ MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 13b34177cc85..c68d83c87f83 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -60,9 +60,6 @@ struct partition_meta_info { * device. * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl. * - * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up", - * with a similar meaning to network interfaces. - * * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include * partition information in ``/proc/partitions`` or in the output of * printk_all_partitions(). @@ -97,7 +94,6 @@ struct partition_meta_info { /* 2 is unused (used to be GENHD_FL_DRIVERFS) */ /* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */ #define GENHD_FL_CD 0x0008 -#define GENHD_FL_UP 0x0010 #define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020 #define GENHD_FL_EXT_DEVT 0x0040 #define GENHD_FL_NATIVE_CAPACITY 0x0080 @@ -153,13 +149,15 @@ struct gendisk { unsigned long state; #define GD_NEED_PART_SCAN 0 #define GD_READ_ONLY 1 -#define GD_QUEUE_REF 2 struct mutex open_mutex; /* open/close mutex */ unsigned open_partitions; /* number of open partitions */ + struct backing_dev_info *bdi; struct kobject *slave_dir; - +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED + struct list_head slave_bdevs; +#endif struct timer_rand_state *random; atomic_t sync_io; /* RAID */ struct disk_events *ev; @@ -172,8 +170,14 @@ struct gendisk { int node_id; struct badblocks *bb; struct lockdep_map lockdep_map; + u64 diskseq; }; +static inline bool disk_live(struct gendisk *disk) +{ + return !inode_unhashed(disk->part0->bd_inode); +} + /* * The gendisk is refcounted by the part0 block_device, and the bd_device * therein is also used for device model presentation in sysfs. @@ -210,18 +214,12 @@ static inline dev_t disk_devt(struct gendisk *disk) void disk_uevent(struct gendisk *disk, enum kobject_action action); /* block/genhd.c */ -extern void device_add_disk(struct device *parent, struct gendisk *disk, - const struct attribute_group **groups); -static inline void add_disk(struct gendisk *disk) +int device_add_disk(struct device *parent, struct gendisk *disk, + const struct attribute_group **groups); +static inline int add_disk(struct gendisk *disk) { - device_add_disk(NULL, disk, NULL); + return device_add_disk(NULL, disk, NULL); } -extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); -static inline void add_disk_no_queue_reg(struct gendisk *disk) -{ - device_add_disk_no_queue_reg(NULL, disk); -} - extern void del_gendisk(struct gendisk *gp); void set_disk_ro(struct gendisk *disk, bool read_only); @@ -236,6 +234,7 @@ extern void disk_block_events(struct gendisk *disk); extern void disk_unblock_events(struct gendisk *disk); extern void disk_flush_events(struct gendisk *disk, unsigned int mask); bool set_capacity_and_notify(struct gendisk *disk, sector_t size); +bool disk_force_media_change(struct gendisk *disk, unsigned int events); /* drivers/char/random.c */ extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; @@ -259,26 +258,10 @@ static inline sector_t get_capacity(struct gendisk *disk) int bdev_disk_changed(struct gendisk *disk, bool invalidate); void blk_drop_partitions(struct gendisk *disk); -extern struct gendisk *__alloc_disk_node(int minors, int node_id); +struct gendisk *__alloc_disk_node(struct request_queue *q, int node_id, + struct lock_class_key *lkclass); extern void put_disk(struct gendisk *disk); - -#define alloc_disk_node(minors, node_id) \ -({ \ - static struct lock_class_key __key; \ - const char *__name; \ - struct gendisk *__disk; \ - \ - __name = "(gendisk_completion)"#minors"("#node_id")"; \ - \ - __disk = __alloc_disk_node(minors, node_id); \ - \ - if (__disk) \ - lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \ - \ - __disk; \ -}) - -#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE) +struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass); /** * blk_alloc_disk - allocate a gendisk structure @@ -291,15 +274,10 @@ extern void put_disk(struct gendisk *disk); */ #define blk_alloc_disk(node_id) \ ({ \ - struct gendisk *__disk = __blk_alloc_disk(node_id); \ static struct lock_class_key __key; \ \ - if (__disk) \ - lockdep_init_map(&__disk->lockdep_map, \ - "(bio completion)", &__key, 0); \ - __disk; \ + __blk_alloc_disk(node_id, &__key); \ }) -struct gendisk *__blk_alloc_disk(int node); void blk_cleanup_disk(struct gendisk *disk); int __register_blkdev(unsigned int major, const char *name, @@ -316,9 +294,10 @@ void set_capacity(struct gendisk *disk, sector_t size); int blkdev_ioctl(struct block_device *, fmode_t, unsigned, unsigned long); long compat_blkdev_ioctl(struct file *, unsigned, unsigned long); -#ifdef CONFIG_SYSFS +#ifdef CONFIG_BLOCK_HOLDER_DEPRECATED int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk); void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk); +int bd_register_pending_holders(struct gendisk *disk); #else static inline int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) @@ -329,9 +308,14 @@ static inline void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) { } -#endif /* CONFIG_SYSFS */ +static inline int bd_register_pending_holders(struct gendisk *disk) +{ + return 0; +} +#endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */ dev_t part_devt(struct gendisk *disk, u8 partno); +void inc_diskseq(struct gendisk *disk); dev_t blk_lookup_devt(const char *name, int partno); void blk_request_module(dev_t devt); #ifdef CONFIG_BLOCK diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 566feb56601f..97a28ad3393b 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h @@ -609,7 +609,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev, #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO) struct device_node; -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, +struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label); @@ -619,7 +619,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, struct device_node; static inline -struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, +struct gpio_desc *gpiod_get_from_of_node(const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label) @@ -633,7 +633,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node, struct device_node; struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, + const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label); @@ -644,7 +644,7 @@ struct device_node; static inline struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, - struct device_node *node, + const struct device_node *node, const char *propname, int index, enum gpiod_flags dflags, const char *label) @@ -680,10 +680,10 @@ struct acpi_gpio_mapping { unsigned int quirks; }; -#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) - struct acpi_device; +#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_ACPI) + int acpi_dev_add_driver_gpios(struct acpi_device *adev, const struct acpi_gpio_mapping *gpios); void acpi_dev_remove_driver_gpios(struct acpi_device *adev); @@ -696,8 +696,6 @@ struct gpio_desc *acpi_get_and_request_gpiod(char *path, int pin, char *label); #else /* CONFIG_GPIOLIB && CONFIG_ACPI */ -struct acpi_device; - static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev, const struct acpi_gpio_mapping *gpios) { diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 3a268781fcec..a0f9901dcae6 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -312,6 +312,9 @@ struct gpio_irq_chip { * get rid of the static GPIO number space in the long run. * @ngpio: the number of GPIOs handled by this controller; the last GPIO * handled is (base + ngpio - 1). + * @offset: when multiple gpio chips belong to the same device this + * can be used as offset within the device so friendly names can + * be properly assigned. * @names: if set, must be an array of strings to use as alternative * names for the GPIOs in this chip. Any entry in the array * may be NULL if there is no alias for the GPIO, however the @@ -398,6 +401,7 @@ struct gpio_chip { int base; u16 ngpio; + u16 offset; const char *const *names; bool can_sleep; diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 69bc86ea382c..76878b357ffa 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -116,7 +116,6 @@ extern void rcu_nmi_exit(void); do { \ lockdep_off(); \ arch_nmi_enter(); \ - printk_nmi_enter(); \ BUG_ON(in_nmi() == NMI_MASK); \ __preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \ } while (0) @@ -135,7 +134,6 @@ extern void rcu_nmi_exit(void); do { \ BUG_ON(!in_nmi()); \ __preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \ - printk_nmi_exit(); \ arch_nmi_exit(); \ lockdep_on(); \ } while (0) diff --git a/include/linux/hdlc.h b/include/linux/hdlc.h index cacc4dd27794..630a388035f1 100644 --- a/include/linux/hdlc.h +++ b/include/linux/hdlc.h @@ -22,7 +22,7 @@ struct hdlc_proto { void (*start)(struct net_device *dev); /* if open & DCD */ void (*stop)(struct net_device *dev); /* if open & !DCD */ void (*detach)(struct net_device *dev); - int (*ioctl)(struct net_device *dev, struct ifreq *ifr); + int (*ioctl)(struct net_device *dev, struct if_settings *ifs); __be16 (*type_trans)(struct sk_buff *skb, struct net_device *dev); int (*netif_rx)(struct sk_buff *skb); netdev_tx_t (*xmit)(struct sk_buff *skb, struct net_device *dev); @@ -54,7 +54,7 @@ typedef struct hdlc_device { /* Exported from hdlc module */ /* Called by hardware driver when a user requests HDLC service */ -int hdlc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); +int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs); /* Must be used by hardware driver on module startup/exit */ #define register_hdlc_device(dev) register_netdev(dev) diff --git a/include/linux/hdlcdrv.h b/include/linux/hdlcdrv.h index d4d633a49d36..5d70c3f98f5b 100644 --- a/include/linux/hdlcdrv.h +++ b/include/linux/hdlcdrv.h @@ -79,7 +79,7 @@ struct hdlcdrv_ops { */ int (*open)(struct net_device *); int (*close)(struct net_device *); - int (*ioctl)(struct net_device *, struct ifreq *, + int (*ioctl)(struct net_device *, void __user *, struct hdlcdrv_ioctl *, int); }; diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 7902c7d8b55f..4aa1031d3e4c 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -90,7 +90,11 @@ static inline void __kunmap_local(void *vaddr) static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); + pagefault_disable(); return __kmap_local_page_prot(page, prot); } @@ -102,7 +106,11 @@ static inline void *kmap_atomic(struct page *page) static inline void *kmap_atomic_pfn(unsigned long pfn) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); + pagefault_disable(); return __kmap_local_pfn_prot(pfn, kmap_prot); } @@ -111,7 +119,10 @@ static inline void __kunmap_atomic(void *addr) { kunmap_local_indexed(addr); pagefault_enable(); - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_enable(); + else + preempt_enable(); } unsigned int __nr_free_highpages(void); @@ -179,7 +190,10 @@ static inline void __kunmap_local(void *addr) static inline void *kmap_atomic(struct page *page) { - preempt_disable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_disable(); + else + preempt_disable(); pagefault_disable(); return page_address(page); } @@ -200,7 +214,10 @@ static inline void __kunmap_atomic(void *addr) kunmap_flush_on_unmap(addr); #endif pagefault_enable(); - preempt_enable(); + if (IS_ENABLED(CONFIG_PREEMPT_RT)) + migrate_enable(); + else + preempt_enable(); } static inline unsigned int nr_free_highpages(void) { return 0; } diff --git a/include/linux/highmem.h b/include/linux/highmem.h index d9a606a9fc64..b4c49f9cc379 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -130,10 +130,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page } #endif -#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE -static inline void flush_kernel_dcache_page(struct page *page) -{ -} +#ifndef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE static inline void flush_kernel_vmap_range(void *vaddr, int size) { } diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 9b0487c88571..7bccf589aba7 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -170,6 +170,8 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base); void host1x_syncpt_release_vblank_reservation(struct host1x_client *client, u32 syncpt_id); +struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold); + /* * host1x channel */ @@ -216,8 +218,8 @@ struct host1x_job { struct host1x_client *client; /* Gathers and their memory */ - struct host1x_job_gather *gathers; - unsigned int num_gathers; + struct host1x_job_cmd *cmds; + unsigned int num_cmds; /* Array of handles to be pinned & unpinned */ struct host1x_reloc *relocs; @@ -234,9 +236,15 @@ struct host1x_job { u32 syncpt_incrs; u32 syncpt_end; + /* Completion waiter ref */ + void *waiter; + /* Maximum time to wait for this job */ unsigned int timeout; + /* Job has timed out and should be released */ + bool cancelled; + /* Index and number of slots used in the push buffer */ unsigned int first_get; unsigned int num_slots; @@ -257,12 +265,25 @@ struct host1x_job { /* Add a channel wait for previous ops to complete */ bool serialize; + + /* Fast-forward syncpoint increments on job timeout */ + bool syncpt_recovery; + + /* Callback called when job is freed */ + void (*release)(struct host1x_job *job); + void *user_data; + + /* Whether host1x-side firewall should be ran for this job or not */ + bool enable_firewall; }; struct host1x_job *host1x_job_alloc(struct host1x_channel *ch, - u32 num_cmdbufs, u32 num_relocs); + u32 num_cmdbufs, u32 num_relocs, + bool skip_firewall); void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, unsigned int words, unsigned int offset); +void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh, + bool relative, u32 next_class); struct host1x_job *host1x_job_get(struct host1x_job *job); void host1x_job_put(struct host1x_job *job); int host1x_job_pin(struct host1x_job *job, struct device *dev); diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index bb5e7b0a4274..0ee140176f10 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -318,16 +318,12 @@ struct clock_event_device; extern void hrtimer_interrupt(struct clock_event_device *dev); -extern void clock_was_set_delayed(void); - extern unsigned int hrtimer_resolution; #else #define hrtimer_resolution (unsigned int)LOW_RES_NSEC -static inline void clock_was_set_delayed(void) { } - #endif static inline ktime_t @@ -351,13 +347,13 @@ hrtimer_expires_remaining_adjusted(const struct hrtimer *timer) timer->base->get_time()); } -extern void clock_was_set(void); #ifdef CONFIG_TIMERFD extern void timerfd_clock_was_set(void); +extern void timerfd_resume(void); #else static inline void timerfd_clock_was_set(void) { } +static inline void timerfd_resume(void) { } #endif -extern void hrtimers_resume(void); DECLARE_PER_CPU(struct tick_device, tick_cpu_device); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f7ca1a3870ea..1faebe1cd0ed 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -858,6 +858,11 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); +static inline void hugetlb_count_init(struct mm_struct *mm) +{ + atomic_long_set(&mm->hugetlb_usage, 0); +} + static inline void hugetlb_count_add(long l, struct mm_struct *mm) { atomic_long_add(l, &mm->hugetlb_usage); @@ -1042,6 +1047,10 @@ static inline spinlock_t *huge_pte_lockptr(struct hstate *h, return &mm->page_table_lock; } +static inline void hugetlb_count_init(struct mm_struct *mm) +{ +} + static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) { } diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0b8d1fdda3a1..c137396129db 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -121,6 +121,13 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) css_put(&h_cg->css); } +static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( + struct resv_map *resv_map) +{ + if (resv_map->css) + css_get(resv_map->css); +} + extern int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr); extern int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages, @@ -199,6 +206,11 @@ static inline void hugetlb_cgroup_put_rsvd_cgroup(struct hugetlb_cgroup *h_cg) { } +static inline void resv_map_dup_hugetlb_cgroup_uncharge_info( + struct resv_map *resv_map) +{ +} + static inline int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr) { diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 2e859d2f9609..ddc8713ce57b 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -538,12 +538,6 @@ struct vmbus_channel_rescind_offer { u32 child_relid; } __packed; -static inline u32 -hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi) -{ - return rbi->ring_buffer->pending_send_sz; -} - /* * Request Offer -- no parameters, SynIC message contains the partition ID * Set Snoop -- no parameters, SynIC message contains the partition ID @@ -1092,16 +1086,6 @@ static inline void set_channel_pending_send_size(struct vmbus_channel *c, c->outbound.ring_buffer->pending_send_sz = size; } -static inline void set_low_latency_mode(struct vmbus_channel *c) -{ - c->low_latency = true; -} - -static inline void clear_low_latency_mode(struct vmbus_channel *c) -{ - c->low_latency = false; -} - void vmbus_onmessage(struct vmbus_channel_message_header *hdr); int vmbus_request_offers(void); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 3eb60a2e9e61..2ce3efbe9198 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -1010,6 +1010,7 @@ struct acpi_resource_i2c_serialbus; #if IS_ENABLED(CONFIG_ACPI) bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, struct acpi_resource_i2c_serialbus **i2c); +int i2c_acpi_client_count(struct acpi_device *adev); u32 i2c_acpi_find_bus_speed(struct device *dev); struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, struct i2c_board_info *info); @@ -1020,6 +1021,10 @@ static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, { return false; } +static inline int i2c_acpi_client_count(struct acpi_device *adev) +{ + return 0; +} static inline u32 i2c_acpi_find_bus_speed(struct device *dev) { return 0; diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index a6730072d13a..694264503119 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1088,6 +1088,48 @@ struct ieee80211_ext { } u; } __packed __aligned(2); +#define IEEE80211_TWT_CONTROL_NDP BIT(0) +#define IEEE80211_TWT_CONTROL_RESP_MODE BIT(1) +#define IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST BIT(3) +#define IEEE80211_TWT_CONTROL_RX_DISABLED BIT(4) +#define IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT BIT(5) + +#define IEEE80211_TWT_REQTYPE_REQUEST BIT(0) +#define IEEE80211_TWT_REQTYPE_SETUP_CMD GENMASK(3, 1) +#define IEEE80211_TWT_REQTYPE_TRIGGER BIT(4) +#define IEEE80211_TWT_REQTYPE_IMPLICIT BIT(5) +#define IEEE80211_TWT_REQTYPE_FLOWTYPE BIT(6) +#define IEEE80211_TWT_REQTYPE_FLOWID GENMASK(9, 7) +#define IEEE80211_TWT_REQTYPE_WAKE_INT_EXP GENMASK(14, 10) +#define IEEE80211_TWT_REQTYPE_PROTECTION BIT(15) + +enum ieee80211_twt_setup_cmd { + TWT_SETUP_CMD_REQUEST, + TWT_SETUP_CMD_SUGGEST, + TWT_SETUP_CMD_DEMAND, + TWT_SETUP_CMD_GROUPING, + TWT_SETUP_CMD_ACCEPT, + TWT_SETUP_CMD_ALTERNATE, + TWT_SETUP_CMD_DICTATE, + TWT_SETUP_CMD_REJECT, +}; + +struct ieee80211_twt_params { + __le16 req_type; + __le64 twt; + u8 min_twt_dur; + __le16 mantissa; + u8 channel; +} __packed; + +struct ieee80211_twt_setup { + u8 dialog_token; + u8 element_id; + u8 length; + u8 control; + u8 params[]; +} __packed; + struct ieee80211_mgmt { __le16 frame_control; __le16 duration; @@ -1252,6 +1294,10 @@ struct ieee80211_mgmt { __le16 toa_error; u8 variable[0]; } __packed ftm; + struct { + u8 action_code; + u8 variable[]; + } __packed s1g; } u; } __packed action; } u; @@ -2266,6 +2312,9 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) #define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x40000000 #define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x80000000 +#define IEEE80211_6GHZ_CTRL_REG_LPI_AP 0 +#define IEEE80211_6GHZ_CTRL_REG_SP_AP 1 + /** * ieee80211_he_6ghz_oper - HE 6 GHz operation Information field * @primary: primary channel @@ -2282,6 +2331,7 @@ struct ieee80211_he_6ghz_oper { #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_80MHZ 2 #define IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ 3 #define IEEE80211_HE_6GHZ_OPER_CTRL_DUP_BEACON 0x4 +#define IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO 0x38 u8 control; u8 ccfs0; u8 ccfs1; @@ -2289,6 +2339,44 @@ struct ieee80211_he_6ghz_oper { } __packed; /* + * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021", + * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation + * subfield encoding", and two category for each type in "Table E-12-Regulatory + * Info subfield encoding in the United States". + * So it it totally max 8 Transmit Power Envelope element. + */ +#define IEEE80211_TPE_MAX_IE_COUNT 8 +/* + * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield" + * of "IEEE Std 802.11ax™‐2021", the max power level is 8. + */ +#define IEEE80211_MAX_NUM_PWR_LEVEL 8 + +#define IEEE80211_TPE_MAX_POWER_COUNT 8 + +/* transmit power interpretation type of transmit power envelope element */ +enum ieee80211_tx_power_intrpt_type { + IEEE80211_TPE_LOCAL_EIRP, + IEEE80211_TPE_LOCAL_EIRP_PSD, + IEEE80211_TPE_REG_CLIENT_EIRP, + IEEE80211_TPE_REG_CLIENT_EIRP_PSD, +}; + +/** + * struct ieee80211_tx_pwr_env + * + * This structure represents the "Transmit Power Envelope element" + */ +struct ieee80211_tx_pwr_env { + u8 tx_power_info; + s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT]; +} __packed; + +#define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7 +#define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38 +#define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0 + +/* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte * after the ext ID byte. It is assumed that he_oper_ie has at least @@ -2869,7 +2957,7 @@ enum ieee80211_eid { WLAN_EID_VHT_OPERATION = 192, WLAN_EID_EXTENDED_BSS_LOAD = 193, WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194, - WLAN_EID_VHT_TX_POWER_ENVELOPE = 195, + WLAN_EID_TX_POWER_ENVELOPE = 195, WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196, WLAN_EID_AID = 197, WLAN_EID_QUIET_CHANNEL = 198, @@ -2881,6 +2969,7 @@ enum ieee80211_eid { WLAN_EID_AID_RESPONSE = 211, WLAN_EID_S1G_BCN_COMPAT = 213, WLAN_EID_S1G_SHORT_BCN_INTERVAL = 214, + WLAN_EID_S1G_TWT = 216, WLAN_EID_S1G_CAPABILITIES = 217, WLAN_EID_VENDOR_SPECIFIC = 221, WLAN_EID_QOS_PARAMETER = 222, @@ -2950,6 +3039,7 @@ enum ieee80211_category { WLAN_CATEGORY_FST = 18, WLAN_CATEGORY_UNPROT_DMG = 20, WLAN_CATEGORY_VHT = 21, + WLAN_CATEGORY_S1G = 22, WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, WLAN_CATEGORY_VENDOR_SPECIFIC = 127, }; @@ -3023,6 +3113,20 @@ enum ieee80211_key_len { WLAN_KEY_LEN_BIP_GMAC_256 = 32, }; +enum ieee80211_s1g_actioncode { + WLAN_S1G_AID_SWITCH_REQUEST, + WLAN_S1G_AID_SWITCH_RESPONSE, + WLAN_S1G_SYNC_CONTROL, + WLAN_S1G_STA_INFO_ANNOUNCE, + WLAN_S1G_EDCA_PARAM_SET, + WLAN_S1G_EL_OPERATION, + WLAN_S1G_TWT_SETUP, + WLAN_S1G_TWT_TEARDOWN, + WLAN_S1G_SECT_GROUP_ID_LIST, + WLAN_S1G_SECT_ID_FEEDBACK, + WLAN_S1G_TWT_INFORMATION = 11, +}; + #define IEEE80211_WEP_IV_LEN 4 #define IEEE80211_WEP_ICV_LEN 4 #define IEEE80211_CCMP_HDR_LEN 8 diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index b651c5e32a28..509e18c7e740 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -57,10 +57,16 @@ struct br_ip_list { #define BR_MRP_AWARE BIT(17) #define BR_MRP_LOST_CONT BIT(18) #define BR_MRP_LOST_IN_CONT BIT(19) +#define BR_TX_FWD_OFFLOAD BIT(20) #define BR_DEFAULT_AGEING_TIME (300 * HZ) -extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *)); +struct net_bridge; +void brioctl_set(int (*hook)(struct net *net, struct net_bridge *br, + unsigned int cmd, struct ifreq *ifr, + void __user *uarg)); +int br_ioctl_call(struct net *net, struct net_bridge *br, unsigned int cmd, + struct ifreq *ifr, void __user *uarg); #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, @@ -70,9 +76,6 @@ bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); bool br_multicast_has_router_adjacent(struct net_device *dev, int proto); bool br_multicast_enabled(const struct net_device *dev); bool br_multicast_router(const struct net_device *dev); -int br_mdb_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack); #else static inline int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list) @@ -104,13 +107,6 @@ static inline bool br_multicast_router(const struct net_device *dev) { return false; } -static inline int br_mdb_replay(const struct net_device *br_dev, - const struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) -{ - return -EOPNOTSUPP; -} #endif #if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_VLAN_FILTERING) @@ -120,9 +116,8 @@ int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid); int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto); int br_vlan_get_info(const struct net_device *dev, u16 vid, struct bridge_vlan_info *p_vinfo); -int br_vlan_replay(struct net_device *br_dev, struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack); +int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo); #else static inline bool br_vlan_enabled(const struct net_device *dev) { @@ -150,12 +145,10 @@ static inline int br_vlan_get_info(const struct net_device *dev, u16 vid, return -EINVAL; } -static inline int br_vlan_replay(struct net_device *br_dev, - struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb, - struct netlink_ext_ack *extack) +static inline int br_vlan_get_info_rcu(const struct net_device *dev, u16 vid, + struct bridge_vlan_info *p_vinfo) { - return -EOPNOTSUPP; + return -EINVAL; } #endif @@ -167,8 +160,6 @@ void br_fdb_clear_offload(const struct net_device *dev, u16 vid); bool br_port_flag_is_set(const struct net_device *dev, unsigned long flag); u8 br_port_get_stp_state(const struct net_device *dev); clock_t br_get_ageing_time(const struct net_device *br_dev); -int br_fdb_replay(const struct net_device *br_dev, const struct net_device *dev, - const void *ctx, bool adding, struct notifier_block *nb); #else static inline struct net_device * br_fdb_find_port(const struct net_device *br_dev, @@ -197,13 +188,6 @@ static inline clock_t br_get_ageing_time(const struct net_device *br_dev) { return 0; } - -static inline int br_fdb_replay(const struct net_device *br_dev, - const struct net_device *dev, const void *ctx, - bool adding, struct notifier_block *nb) -{ - return -EOPNOTSUPP; -} #endif #endif diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 64ce8cd1cfaf..93c262ecbdc9 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h @@ -41,9 +41,6 @@ struct ip_sf_socklist { __be32 sl_addr[]; }; -#define IP_SFLSIZE(count) (sizeof(struct ip_sf_socklist) + \ - (count) * sizeof(__be32)) - #define IP_SFBLOCK 10 /* allocate this many at once */ /* ip_mc_socklist is real list now. Speed is not argument; diff --git a/include/linux/ima.h b/include/linux/ima.h index 61d5723ec303..b6ab66a546ae 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -11,9 +11,11 @@ #include <linux/fs.h> #include <linux/security.h> #include <linux/kexec.h> +#include <crypto/hash_info.h> struct linux_binprm; #ifdef CONFIG_IMA +extern enum hash_algo ima_get_current_hash_algo(void); extern int ima_bprm_check(struct linux_binprm *bprm); extern int ima_file_check(struct file *file, int mask); extern void ima_post_create_tmpfile(struct user_namespace *mnt_userns, @@ -33,10 +35,10 @@ extern void ima_post_path_mknod(struct user_namespace *mnt_userns, extern int ima_file_hash(struct file *file, char *buf, size_t buf_size); extern int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size); extern void ima_kexec_cmdline(int kernel_fd, const void *buf, int size); -extern void ima_measure_critical_data(const char *event_label, - const char *event_name, - const void *buf, size_t buf_len, - bool hash); +extern int ima_measure_critical_data(const char *event_label, + const char *event_name, + const void *buf, size_t buf_len, + bool hash, u8 *digest, size_t digest_len); #ifdef CONFIG_IMA_APPRAISE_BOOTPARAM extern void ima_appraise_parse_cmdline(void); @@ -64,6 +66,11 @@ static inline const char * const *arch_get_ima_policy(void) #endif #else +static inline enum hash_algo ima_get_current_hash_algo(void) +{ + return HASH_ALGO__LAST; +} + static inline int ima_bprm_check(struct linux_binprm *bprm) { return 0; @@ -137,10 +144,14 @@ static inline int ima_inode_hash(struct inode *inode, char *buf, size_t buf_size static inline void ima_kexec_cmdline(int kernel_fd, const void *buf, int size) {} -static inline void ima_measure_critical_data(const char *event_label, +static inline int ima_measure_critical_data(const char *event_label, const char *event_name, const void *buf, size_t buf_len, - bool hash) {} + bool hash, u8 *digest, + size_t digest_len) +{ + return -ENOENT; +} #endif /* CONFIG_IMA */ diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 53aa0343bf69..a038feb63f23 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h @@ -41,7 +41,7 @@ struct in_device { unsigned long mr_qri; /* Query Response Interval */ unsigned char mr_qrv; /* Query Robustness Variable */ unsigned char mr_gq_running; - unsigned char mr_ifc_count; + u32 mr_ifc_count; struct timer_list mr_gq_timer; /* general query timer */ struct timer_list mr_ifc_timer; /* interface change timer */ @@ -178,6 +178,15 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); +#ifdef CONFIG_INET +int inet_gifconf(struct net_device *dev, char __user *buf, int len, int size); +#else +static inline int inet_gifconf(struct net_device *dev, char __user *buf, + int len, int size) +{ + return 0; +} +#endif void devinet_init(void); struct in_device *inetdev_by_index(struct net *, int); __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index d0fa0b31994d..05a65eb155f7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -124,9 +124,9 @@ #define DMAR_MTRR_PHYSMASK8_REG 0x208 #define DMAR_MTRR_PHYSBASE9_REG 0x210 #define DMAR_MTRR_PHYSMASK9_REG 0x218 -#define DMAR_VCCAP_REG 0xe00 /* Virtual command capability register */ -#define DMAR_VCMD_REG 0xe10 /* Virtual command register */ -#define DMAR_VCRSP_REG 0xe20 /* Virtual command response register */ +#define DMAR_VCCAP_REG 0xe30 /* Virtual command capability register */ +#define DMAR_VCMD_REG 0xe00 /* Virtual command register */ +#define DMAR_VCRSP_REG 0xe10 /* Virtual command response register */ #define DMAR_IQER_REG_IQEI(reg) FIELD_GET(GENMASK_ULL(3, 0), reg) #define DMAR_IQER_REG_ITESID(reg) FIELD_GET(GENMASK_ULL(47, 32), reg) diff --git a/include/linux/intel-ish-client-if.h b/include/linux/intel-ish-client-if.h index 25e2b4e80502..aee8ff4739b1 100644 --- a/include/linux/intel-ish-client-if.h +++ b/include/linux/intel-ish-client-if.h @@ -81,6 +81,8 @@ int ishtp_register_event_cb(struct ishtp_cl_device *device, /* Get the device * from ishtp device instance */ struct device *ishtp_device(struct ishtp_cl_device *cl_device); +/* wait for IPC resume */ +bool ishtp_wait_resume(struct ishtp_device *dev); /* Trace interface for clients */ ishtp_print_log ishtp_trace_callback(struct ishtp_cl_device *cl_device); /* Get device pointer of PCI device for DMA acces */ diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 10fa80eef13a..57cceecbe37f 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h @@ -14,6 +14,11 @@ #define SVM_REQ_EXEC (1<<1) #define SVM_REQ_PRIV (1<<0) +/* Page Request Queue depth */ +#define PRQ_ORDER 2 +#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) +#define PRQ_DEPTH ((0x1000 << PRQ_ORDER) >> 5) + /* * The SVM_FLAG_SUPERVISOR_MODE flag requests a PASID which can be used only * for access to kernel addresses. No IOTLB flushes are automatically done diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2ed65b01c961..1f22a30c0963 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -13,6 +13,7 @@ #include <linux/hrtimer.h> #include <linux/kref.h> #include <linux/workqueue.h> +#include <linux/jump_label.h> #include <linux/atomic.h> #include <asm/ptrace.h> @@ -474,12 +475,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, #ifdef CONFIG_IRQ_FORCED_THREADING # ifdef CONFIG_PREEMPT_RT -# define force_irqthreads (true) +# define force_irqthreads() (true) # else -extern bool force_irqthreads; +DECLARE_STATIC_KEY_FALSE(force_irqthreads_key); +# define force_irqthreads() (static_branch_unlikely(&force_irqthreads_key)) # endif #else -#define force_irqthreads (0) +#define force_irqthreads() (false) #endif #ifndef local_softirq_pending diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index 4d40dfa75b55..86af6f0a00a2 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -16,6 +16,7 @@ enum io_pgtable_fmt { ARM_V7S, ARM_MALI_LPAE, AMD_IOMMU_V1, + APPLE_DART, IO_PGTABLE_NUM_FMTS, }; @@ -73,10 +74,6 @@ struct io_pgtable_cfg { * to support up to 35 bits PA where the bit32, bit33 and bit34 are * encoded in the bit9, bit4 and bit5 of the PTE respectively. * - * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs - * on unmap, for DMA domains using the flush queue mechanism for - * delayed invalidation. - * * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table * for use in the upper half of a split address space. * @@ -86,7 +83,6 @@ struct io_pgtable_cfg { #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3) - #define IO_PGTABLE_QUIRK_NON_STRICT BIT(4) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) unsigned long quirks; @@ -136,6 +132,11 @@ struct io_pgtable_cfg { u64 transtab; u64 memattr; } arm_mali_lpae_cfg; + + struct { + u64 ttbr[4]; + u32 n_ttbrs; + } apple_dart_cfg; }; }; @@ -143,7 +144,9 @@ struct io_pgtable_cfg { * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. * * @map: Map a physically contiguous memory region. + * @map_pages: Map a physically contiguous range of pages of the same size. * @unmap: Unmap a physically contiguous memory region. + * @unmap_pages: Unmap a range of virtually contiguous pages of the same size. * @iova_to_phys: Translate iova to physical address. * * These functions map directly onto the iommu_ops member functions with @@ -152,8 +155,14 @@ struct io_pgtable_cfg { struct io_pgtable_ops { int (*map)(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, size_t size, struct iommu_iotlb_gather *gather); + size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *gather); phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, unsigned long iova); }; @@ -246,5 +255,6 @@ extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; extern struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns; extern struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns; +extern struct io_pgtable_init_fns io_pgtable_apple_dart_init_fns; #endif /* __IO_PGTABLE_H */ diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h index 04b650bcbbe5..649a4d7c241b 100644 --- a/include/linux/io_uring.h +++ b/include/linux/io_uring.h @@ -7,17 +7,18 @@ #if defined(CONFIG_IO_URING) struct sock *io_uring_get_socket(struct file *file); -void __io_uring_cancel(struct files_struct *files); +void __io_uring_cancel(bool cancel_all); void __io_uring_free(struct task_struct *tsk); -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { if (current->io_uring) - __io_uring_cancel(files); + __io_uring_cancel(false); } static inline void io_uring_task_cancel(void) { - return io_uring_files_cancel(NULL); + if (current->io_uring) + __io_uring_cancel(true); } static inline void io_uring_free(struct task_struct *tsk) { @@ -32,7 +33,7 @@ static inline struct sock *io_uring_get_socket(struct file *file) static inline void io_uring_task_cancel(void) { } -static inline void io_uring_files_cancel(struct files_struct *files) +static inline void io_uring_files_cancel(void) { } static inline void io_uring_free(struct task_struct *tsk) diff --git a/include/linux/ioam6.h b/include/linux/ioam6.h new file mode 100644 index 000000000000..94a24b36998f --- /dev/null +++ b/include/linux/ioam6.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_H +#define _LINUX_IOAM6_H + +#include <uapi/linux/ioam6.h> + +#endif /* _LINUX_IOAM6_H */ diff --git a/include/linux/ioam6_genl.h b/include/linux/ioam6_genl.h new file mode 100644 index 000000000000..176e67919de3 --- /dev/null +++ b/include/linux/ioam6_genl.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM Generic Netlink API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_GENL_H +#define _LINUX_IOAM6_GENL_H + +#include <uapi/linux/ioam6_genl.h> + +#endif /* _LINUX_IOAM6_GENL_H */ diff --git a/include/linux/ioam6_iptunnel.h b/include/linux/ioam6_iptunnel.h new file mode 100644 index 000000000000..07d9dfedd29d --- /dev/null +++ b/include/linux/ioam6_iptunnel.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * IPv6 IOAM Lightweight Tunnel API + * + * Author: + * Justin Iurman <justin.iurman@uliege.be> + */ +#ifndef _LINUX_IOAM6_IPTUNNEL_H +#define _LINUX_IOAM6_IPTUNNEL_H + +#include <uapi/linux/ioam6_iptunnel.h> + +#endif /* _LINUX_IOAM6_IPTUNNEL_H */ diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 479c1da3e221..24f8489583ca 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -91,13 +91,30 @@ struct iomap { const struct iomap_page_ops *page_ops; }; -static inline sector_t -iomap_sector(struct iomap *iomap, loff_t pos) +static inline sector_t iomap_sector(const struct iomap *iomap, loff_t pos) { return (iomap->addr + pos - iomap->offset) >> SECTOR_SHIFT; } /* + * Returns the inline data pointer for logical offset @pos. + */ +static inline void *iomap_inline_data(const struct iomap *iomap, loff_t pos) +{ + return iomap->inline_data + pos - iomap->offset; +} + +/* + * Check if the mapping's length is within the valid range for inline data. + * This is used to guard against accessing data beyond the page inline_data + * points at. + */ +static inline bool iomap_inline_data_valid(const struct iomap *iomap) +{ + return iomap->length <= PAGE_SIZE - offset_in_page(iomap->inline_data); +} + +/* * When a filesystem sets page_ops in an iomap mapping it returns, page_prepare * and page_done will be called for each page written to. This only applies to * buffered writes as unbuffered writes will not typically have pages @@ -108,10 +125,9 @@ iomap_sector(struct iomap *iomap, loff_t pos) * associated page could not be obtained. */ struct iomap_page_ops { - int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len, - struct iomap *iomap); + int (*page_prepare)(struct inode *inode, loff_t pos, unsigned len); void (*page_done)(struct inode *inode, loff_t pos, unsigned copied, - struct page *page, struct iomap *iomap); + struct page *page); }; /* @@ -124,6 +140,7 @@ struct iomap_page_ops { #define IOMAP_DIRECT (1 << 4) /* direct I/O */ #define IOMAP_NOWAIT (1 << 5) /* do not block */ #define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */ +#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */ struct iomap_ops { /* @@ -145,15 +162,61 @@ struct iomap_ops { ssize_t written, unsigned flags, struct iomap *iomap); }; -/* - * Main iomap iterator function. +/** + * struct iomap_iter - Iterate through a range of a file + * @inode: Set at the start of the iteration and should not change. + * @pos: The current file position we are operating on. It is updated by + * calls to iomap_iter(). Treat as read-only in the body. + * @len: The remaining length of the file segment we're operating on. + * It is updated at the same time as @pos. + * @processed: The number of bytes processed by the body in the most recent + * iteration, or a negative errno. 0 causes the iteration to stop. + * @flags: Zero or more of the iomap_begin flags above. + * @iomap: Map describing the I/O iteration + * @srcmap: Source map for COW operations */ -typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len, - void *data, struct iomap *iomap, struct iomap *srcmap); +struct iomap_iter { + struct inode *inode; + loff_t pos; + u64 len; + s64 processed; + unsigned flags; + struct iomap iomap; + struct iomap srcmap; +}; -loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length, - unsigned flags, const struct iomap_ops *ops, void *data, - iomap_actor_t actor); +int iomap_iter(struct iomap_iter *iter, const struct iomap_ops *ops); + +/** + * iomap_length - length of the current iomap iteration + * @iter: iteration structure + * + * Returns the length that the operation applies to for the current iteration. + */ +static inline u64 iomap_length(const struct iomap_iter *iter) +{ + u64 end = iter->iomap.offset + iter->iomap.length; + + if (iter->srcmap.type != IOMAP_HOLE) + end = min(end, iter->srcmap.offset + iter->srcmap.length); + return min(iter->len, end - iter->pos); +} + +/** + * iomap_iter_srcmap - return the source map for the current iomap iteration + * @i: iteration structure + * + * Write operations on file systems with reflink support might require a + * source and a destination map. This function retourns the source map + * for a given operation, which may or may no be identical to the destination + * map in &i->iomap. + */ +static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) +{ + if (i->srcmap.type != IOMAP_HOLE) + return &i->srcmap; + return &i->iomap; +} ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, const struct iomap_ops *ops); @@ -250,8 +313,8 @@ int iomap_writepages(struct address_space *mapping, struct iomap_dio_ops { int (*end_io)(struct kiocb *iocb, ssize_t size, int error, unsigned flags); - blk_qc_t (*submit_io)(struct inode *inode, struct iomap *iomap, - struct bio *bio, loff_t file_offset); + blk_qc_t (*submit_io)(const struct iomap_iter *iter, struct bio *bio, + loff_t file_offset); }; /* diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 32d448050bf7..d2f3435e7d17 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -40,6 +40,7 @@ struct iommu_domain; struct notifier_block; struct iommu_sva; struct iommu_fault_event; +struct iommu_dma_cookie; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -60,6 +61,7 @@ struct iommu_domain_geometry { #define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API implementation */ #define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ +#define __IOMMU_DOMAIN_DMA_FQ (1U << 3) /* DMA-API uses flush queue */ /* * This are the possible domain-types @@ -72,12 +74,17 @@ struct iommu_domain_geometry { * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. * This flag allows IOMMU drivers to implement * certain optimizations for these domains + * IOMMU_DOMAIN_DMA_FQ - As above, but definitely using batched TLB + * invalidation. */ #define IOMMU_DOMAIN_BLOCKED (0U) #define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) #define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) #define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ __IOMMU_DOMAIN_DMA_API) +#define IOMMU_DOMAIN_DMA_FQ (__IOMMU_DOMAIN_PAGING | \ + __IOMMU_DOMAIN_DMA_API | \ + __IOMMU_DOMAIN_DMA_FQ) struct iommu_domain { unsigned type; @@ -86,9 +93,14 @@ struct iommu_domain { iommu_fault_handler_t handler; void *handler_token; struct iommu_domain_geometry geometry; - void *iova_cookie; + struct iommu_dma_cookie *iova_cookie; }; +static inline bool iommu_is_dma_domain(struct iommu_domain *domain) +{ + return domain->type & __IOMMU_DOMAIN_DMA_API; +} + enum iommu_cap { IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA transactions */ @@ -160,16 +172,22 @@ enum iommu_dev_features { * @start: IOVA representing the start of the range to be flushed * @end: IOVA representing the end of the range to be flushed (inclusive) * @pgsize: The interval at which to perform the flush + * @freelist: Removed pages to free after sync + * @queued: Indicates that the flush will be queued * * This structure is intended to be updated by multiple calls to the * ->unmap() function in struct iommu_ops before eventually being passed - * into ->iotlb_sync(). + * into ->iotlb_sync(). Drivers can add pages to @freelist to be freed after + * ->iotlb_sync() or ->iotlb_flush_all() have cleared all cached references to + * them. @queued is set to indicate when ->iotlb_flush_all() will be called + * later instead of ->iotlb_sync(), so drivers may optimise accordingly. */ struct iommu_iotlb_gather { unsigned long start; unsigned long end; size_t pgsize; struct page *freelist; + bool queued; }; /** @@ -180,7 +198,10 @@ struct iommu_iotlb_gather { * @attach_dev: attach device to an iommu domain * @detach_dev: detach device from an iommu domain * @map: map a physically contiguous memory region to an iommu domain + * @map_pages: map a physically contiguous set of pages of the same size to + * an iommu domain. * @unmap: unmap a physically contiguous memory region from an iommu domain + * @unmap_pages: unmap a number of pages of the same size from an iommu domain * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain * @iotlb_sync_map: Sync mappings created recently using @map to the hardware * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush @@ -229,8 +250,14 @@ struct iommu_ops { void (*detach_dev)(struct iommu_domain *domain, struct device *dev); int (*map)(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp); + int (*map_pages)(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t pgsize, size_t pgcount, + int prot, gfp_t gfp, size_t *mapped); size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); + size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova, + size_t pgsize, size_t pgcount, + struct iommu_iotlb_gather *iotlb_gather); void (*flush_iotlb_all)(struct iommu_domain *domain); void (*iotlb_sync_map)(struct iommu_domain *domain, unsigned long iova, size_t size); @@ -414,11 +441,11 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, extern size_t iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather); -extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg,unsigned int nents, int prot); -extern size_t iommu_map_sg_atomic(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot); +extern ssize_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot); +extern ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot); extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); extern void iommu_set_fault_handler(struct iommu_domain *domain, iommu_fault_handler_t handler, void *token); @@ -476,8 +503,7 @@ int iommu_enable_nesting(struct iommu_domain *domain); int iommu_set_pgtable_quirks(struct iommu_domain *domain, unsigned long quirks); -void iommu_set_dma_strict(bool val); -bool iommu_get_dma_strict(struct iommu_domain *domain); +void iommu_set_dma_strict(void); extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags); @@ -497,29 +523,80 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain, iommu_iotlb_gather_init(iotlb_gather); } +/** + * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint + * + * @gather: TLB gather data + * @iova: start of page to invalidate + * @size: size of page to invalidate + * + * Helper for IOMMU drivers to check whether a new range and the gathered range + * are disjoint. For many IOMMUs, flushing the IOMMU in this case is better + * than merging the two, which might lead to unnecessary invalidations. + */ +static inline +bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) +{ + unsigned long start = iova, end = start + size - 1; + + return gather->end != 0 && + (end + 1 < gather->start || start > gather->end + 1); +} + + +/** + * iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation + * @gather: TLB gather data + * @iova: start of page to invalidate + * @size: size of page to invalidate + * + * Helper for IOMMU drivers to build arbitrarily-sized invalidation commands + * where only the address range matters, and simply minimising intermediate + * syncs is preferred. + */ +static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t size) +{ + unsigned long end = iova + size - 1; + + if (gather->start > iova) + gather->start = iova; + if (gather->end < end) + gather->end = end; +} + +/** + * iommu_iotlb_gather_add_page - Gather for page-based TLB invalidation + * @domain: IOMMU domain to be invalidated + * @gather: TLB gather data + * @iova: start of page to invalidate + * @size: size of page to invalidate + * + * Helper for IOMMU drivers to build invalidation commands based on individual + * pages, or with page size/table level hints which cannot be gathered if they + * differ. + */ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size) { - unsigned long start = iova, end = start + size - 1; - /* * If the new page is disjoint from the current range or is mapped at * a different granularity, then sync the TLB so that the gather * structure can be rewritten. */ - if (gather->pgsize != size || - end + 1 < gather->start || start > gather->end + 1) { - if (gather->pgsize) - iommu_iotlb_sync(domain, gather); - gather->pgsize = size; - } + if ((gather->pgsize && gather->pgsize != size) || + iommu_iotlb_gather_is_disjoint(gather, iova, size)) + iommu_iotlb_sync(domain, gather); - if (gather->end < end) - gather->end = end; + gather->pgsize = size; + iommu_iotlb_gather_add_range(gather, iova, size); +} - if (gather->start > start) - gather->start = start; +static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) +{ + return gather && gather->queued; } /* PCI device grouping function */ @@ -679,18 +756,18 @@ static inline size_t iommu_unmap_fast(struct iommu_domain *domain, return 0; } -static inline size_t iommu_map_sg(struct iommu_domain *domain, - unsigned long iova, struct scatterlist *sg, - unsigned int nents, int prot) +static inline ssize_t iommu_map_sg(struct iommu_domain *domain, + unsigned long iova, struct scatterlist *sg, + unsigned int nents, int prot) { - return 0; + return -ENODEV; } -static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain, +static inline ssize_t iommu_map_sg_atomic(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot) { - return 0; + return -ENODEV; } static inline void iommu_flush_iotlb_all(struct iommu_domain *domain) @@ -870,6 +947,11 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain, { } +static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather) +{ + return false; +} + static inline void iommu_device_unregister(struct iommu_device *iommu) { } diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index e9bfe6972aed..3f53bc27a19b 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -6,46 +6,22 @@ #include <linux/sched/rt.h> #include <linux/iocontext.h> -/* - * Gives us 8 prio classes with 13-bits of data for each class - */ -#define IOPRIO_CLASS_SHIFT (13) -#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) - -#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) -#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) -#define IOPRIO_PRIO_VALUE(class, data) (((class) << IOPRIO_CLASS_SHIFT) | data) - -#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) +#include <uapi/linux/ioprio.h> /* - * These are the io priority groups as implemented by CFQ. RT is the realtime - * class, it always gets premium service. BE is the best-effort scheduling - * class, the default for any process. IDLE is the idle scheduling class, it - * is only served when no one else is using the disk. + * Default IO priority. */ -enum { - IOPRIO_CLASS_NONE, - IOPRIO_CLASS_RT, - IOPRIO_CLASS_BE, - IOPRIO_CLASS_IDLE, -}; +#define IOPRIO_DEFAULT IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_BE_NORM) /* - * 8 best effort priority levels are supported + * Check that a priority value has a valid class. */ -#define IOPRIO_BE_NR (8) - -enum { - IOPRIO_WHO_PROCESS = 1, - IOPRIO_WHO_PGRP, - IOPRIO_WHO_USER, -}; +static inline bool ioprio_valid(unsigned short ioprio) +{ + unsigned short class = IOPRIO_PRIO_CLASS(ioprio); -/* - * Fallback BE priority - */ -#define IOPRIO_NORM (4) + return class > IOPRIO_CLASS_NONE && class <= IOPRIO_CLASS_IDLE; +} /* * if process has set io priority explicitly, use that. if not, convert @@ -80,7 +56,7 @@ static inline int get_current_ioprio(void) if (ioc) return ioc->ioprio; - return IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0); + return IOPRIO_DEFAULT; } /* diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 70b2ad3b9884..ef4a69865737 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -76,6 +76,9 @@ struct ipv6_devconf { __s32 disable_policy; __s32 ndisc_tclass; __s32 rpl_seg_enabled; + __u32 ioam6_id; + __u32 ioam6_id_wide; + __u8 ioam6_enabled; struct ctl_table_header *sysctl_header; }; diff --git a/include/linux/irq.h b/include/linux/irq.h index 8e9a9ae471a6..c8293c817646 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -569,6 +569,7 @@ struct irq_chip { * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs * in the suspend path if they are in disabled state + * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup */ enum { IRQCHIP_SET_TYPE_MASKED = (1 << 0), @@ -581,6 +582,7 @@ enum { IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), IRQCHIP_SUPPORTS_NMI = (1 << 8), IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), + IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), }; #include <linux/irqdesc.h> diff --git a/include/linux/iscsi_ibft.h b/include/linux/iscsi_ibft.h index b7b45ca82bea..790e7fcfc1a6 100644 --- a/include/linux/iscsi_ibft.h +++ b/include/linux/iscsi_ibft.h @@ -13,26 +13,22 @@ #ifndef ISCSI_IBFT_H #define ISCSI_IBFT_H -#include <linux/acpi.h> +#include <linux/types.h> /* - * Logical location of iSCSI Boot Format Table. - * If the value is NULL there is no iBFT on the machine. + * Physical location of iSCSI Boot Format Table. + * If the value is 0 there is no iBFT on the machine. */ -extern struct acpi_table_ibft *ibft_addr; +extern phys_addr_t ibft_phys_addr; /* * Routine used to find and reserve the iSCSI Boot Format Table. The - * mapped address is set in the ibft_addr variable. + * physical address is set in the ibft_phys_addr variable. */ #ifdef CONFIG_ISCSI_IBFT_FIND -unsigned long find_ibft_region(unsigned long *sizep); +void reserve_ibft_region(void); #else -static inline unsigned long find_ibft_region(unsigned long *sizep) -{ - *sizep = 0; - return 0; -} +static inline void reserve_ibft_region(void) {} #endif #endif /* ISCSI_IBFT_H */ diff --git a/include/linux/kasan-tags.h b/include/linux/kasan-tags.h new file mode 100644 index 000000000000..4f85f562512c --- /dev/null +++ b/include/linux/kasan-tags.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_TAGS_H +#define _LINUX_KASAN_TAGS_H + +#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */ +#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */ +#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */ + +#ifdef CONFIG_KASAN_HW_TAGS +#define KASAN_TAG_MIN 0xF0 /* minimum value for random tags */ +#else +#define KASAN_TAG_MIN 0x00 /* minimum value for random tags */ +#endif + +#endif /* LINUX_KASAN_TAGS_H */ diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 0125a677b67f..ea0f5e580fac 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -13,6 +13,8 @@ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> */ +#include <linux/list.h> + /* Shifted versions of the command enable bits are be used if the command * has no arguments (see kdb_check_flags). This allows commands, such as * go, to have different permissions depending upon whether it is called @@ -64,6 +66,17 @@ typedef enum { typedef int (*kdb_func_t)(int, const char **); +/* The KDB shell command table */ +typedef struct _kdbtab { + char *name; /* Command name */ + kdb_func_t func; /* Function to execute command */ + char *usage; /* Usage String for this command */ + char *help; /* Help message for this command */ + short minlen; /* Minimum legal # cmd chars required */ + kdb_cmdflags_t flags; /* Command behaviour flags */ + struct list_head list_node; /* Command list */ +} kdbtab_t; + #ifdef CONFIG_KGDB_KDB #include <linux/init.h> #include <linux/sched.h> @@ -193,19 +206,13 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) #endif /* ! CONFIG_KALLSYMS */ /* Dynamic kdb shell command registration */ -extern int kdb_register(char *, kdb_func_t, char *, char *, short); -extern int kdb_register_flags(char *, kdb_func_t, char *, char *, - short, kdb_cmdflags_t); -extern int kdb_unregister(char *); +extern int kdb_register(kdbtab_t *cmd); +extern void kdb_unregister(kdbtab_t *cmd); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } static inline void kdb_init(int level) {} -static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen) { return 0; } -static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen, - kdb_cmdflags_t flags) { return 0; } -static inline int kdb_unregister(char *cmd) { return 0; } +static inline int kdb_register(kdbtab_t *cmd) { return 0; } +static inline void kdb_unregister(kdbtab_t *cmd) {} #endif /* CONFIG_KGDB_KDB */ enum { KDB_NOT_INITIALIZED, diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 1b2f0a7e00d6..2776423a587e 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -2,7 +2,7 @@ #ifndef _LINUX_KERNEL_H #define _LINUX_KERNEL_H -#include <stdarg.h> +#include <linux/stdarg.h> #include <linux/align.h> #include <linux/limits.h> #include <linux/linkage.h> diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 9e8ca8743c26..1093abf7c28c 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h @@ -98,6 +98,11 @@ struct kernfs_elem_dir { * better directly in kernfs_node but is here to save space. */ struct kernfs_root *root; + /* + * Monotonic revision counter, used to identify if a directory + * node has changed during negative dentry revalidation. + */ + unsigned long rev; }; struct kernfs_elem_symlink { @@ -188,7 +193,7 @@ struct kernfs_root { u32 id_highbits; struct kernfs_syscall_ops *syscall_ops; - /* list of kernfs_super_info of this root, protected by kernfs_mutex */ + /* list of kernfs_super_info of this root, protected by kernfs_rwsem */ struct list_head supers; wait_queue_head_t deactivate_waitq; diff --git a/include/linux/kfence.h b/include/linux/kfence.h index a70d1ea03532..3fe6dd8a18c1 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -51,10 +51,11 @@ extern atomic_t kfence_allocation_gate; static __always_inline bool is_kfence_address(const void *addr) { /* - * The non-NULL check is required in case the __kfence_pool pointer was - * never initialized; keep it in the slow-path after the range-check. + * The __kfence_pool != NULL check is required to deal with the case + * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in + * the slow-path after the range-check! */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && addr); + return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); } /** diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index ae7735b490b4..041ca7f15ea4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -150,6 +150,7 @@ static inline bool is_error_page(struct page *page) #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_UNBLOCK 2 #define KVM_REQ_UNHALT 3 +#define KVM_REQ_VM_BUGGED (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQUEST_ARCH_BASE 8 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ @@ -158,6 +159,15 @@ static inline bool is_error_page(struct page *page) }) #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) +bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except, + unsigned long *vcpu_bitmap, cpumask_var_t tmp); +bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); +bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, + struct kvm_vcpu *except); +bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, + unsigned long *vcpu_bitmap); + #define KVM_USERSPACE_IRQ_SOURCE_ID 0 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 @@ -344,6 +354,13 @@ struct kvm_vcpu { struct kvm_vcpu_stat stat; char stats_id[KVM_STATS_NAME_SIZE]; struct kvm_dirty_ring dirty_ring; + + /* + * The index of the most recently used memslot by this vCPU. It's ok + * if this becomes stale due to memslot changes since we always check + * it is a valid slot. + */ + int last_used_slot; }; /* must be called with irqs disabled */ @@ -512,7 +529,7 @@ struct kvm_memslots { u64 generation; /* The mapping table from slot id to the index in memslots[]. */ short id_to_index[KVM_MEM_SLOTS_NUM]; - atomic_t lru_slot; + atomic_t last_used_slot; int used_slots; struct kvm_memory_slot memslots[]; }; @@ -538,6 +555,11 @@ struct kvm { struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + /* Used to wait for completion of MMU notifiers. */ + spinlock_t mn_invalidate_lock; + unsigned long mn_active_invalidate_count; + struct rcuwait mn_memslots_update_rcuwait; + /* * created_vcpus is protected by kvm->lock, and is incremented * at the beginning of KVM_CREATE_VCPU. online_vcpus is only @@ -596,6 +618,7 @@ struct kvm { pid_t userspace_pid; unsigned int max_halt_poll_ns; u32 dirty_ring_size; + bool vm_bugged; #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER struct notifier_block pm_notifier; @@ -629,6 +652,30 @@ struct kvm { #define vcpu_err(vcpu, fmt, ...) \ kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) +static inline void kvm_vm_bugged(struct kvm *kvm) +{ + kvm->vm_bugged = true; + kvm_make_all_cpus_request(kvm, KVM_REQ_VM_BUGGED); +} + +#define KVM_BUG(cond, kvm, fmt...) \ +({ \ + int __ret = (cond); \ + \ + if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + +#define KVM_BUG_ON(cond, kvm) \ +({ \ + int __ret = (cond); \ + \ + if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \ + kvm_vm_bugged(kvm); \ + unlikely(__ret); \ +}) + static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) { return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); @@ -720,6 +767,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, void kvm_exit(void); void kvm_get_kvm(struct kvm *kvm); +bool kvm_get_kvm_safe(struct kvm *kvm); void kvm_put_kvm(struct kvm *kvm); bool file_is_kvm(struct file *file); void kvm_put_kvm_no_destroy(struct kvm *kvm); @@ -824,7 +872,6 @@ void kvm_release_pfn_clean(kvm_pfn_t pfn); void kvm_release_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_dirty(kvm_pfn_t pfn); void kvm_set_pfn_accessed(kvm_pfn_t pfn); -void kvm_get_pfn(kvm_pfn_t pfn); void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, @@ -943,14 +990,10 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); #endif -bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except, - unsigned long *vcpu_bitmap, cpumask_var_t tmp); -bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); -bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, - struct kvm_vcpu *except); -bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, - unsigned long *vcpu_bitmap); +void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, + unsigned long end); +void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, + unsigned long end); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -1034,6 +1077,7 @@ bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_post_init_vm(struct kvm *kvm); void kvm_arch_pre_destroy_vm(struct kvm *kvm); +int kvm_arch_create_vm_debugfs(struct kvm *kvm); #ifndef __KVM_HAVE_ARCH_VM_ALLOC /* @@ -1157,29 +1201,49 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); /* - * search_memslots() and __gfn_to_memslot() are here because they are - * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. - * gfn_to_memslot() itself isn't here as an inline because that would - * bloat other code too much. + * Returns a pointer to the memslot at slot_index if it contains gfn. + * Otherwise returns NULL. + */ +static inline struct kvm_memory_slot * +try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) +{ + struct kvm_memory_slot *slot; + + if (slot_index < 0 || slot_index >= slots->used_slots) + return NULL; + + /* + * slot_index can come from vcpu->last_used_slot which is not kept + * in sync with userspace-controllable memslot deletion. So use nospec + * to prevent the CPU from speculating past the end of memslots[]. + */ + slot_index = array_index_nospec(slot_index, slots->used_slots); + slot = &slots->memslots[slot_index]; + + if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) + return slot; + else + return NULL; +} + +/* + * Returns a pointer to the memslot that contains gfn and records the index of + * the slot in index. Otherwise returns NULL. * * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! */ static inline struct kvm_memory_slot * -search_memslots(struct kvm_memslots *slots, gfn_t gfn) +search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) { int start = 0, end = slots->used_slots; - int slot = atomic_read(&slots->lru_slot); struct kvm_memory_slot *memslots = slots->memslots; + struct kvm_memory_slot *slot; if (unlikely(!slots->used_slots)) return NULL; - if (gfn >= memslots[slot].base_gfn && - gfn < memslots[slot].base_gfn + memslots[slot].npages) - return &memslots[slot]; - while (start < end) { - slot = start + (end - start) / 2; + int slot = start + (end - start) / 2; if (gfn >= memslots[slot].base_gfn) end = slot; @@ -1187,19 +1251,37 @@ search_memslots(struct kvm_memslots *slots, gfn_t gfn) start = slot + 1; } - if (start < slots->used_slots && gfn >= memslots[start].base_gfn && - gfn < memslots[start].base_gfn + memslots[start].npages) { - atomic_set(&slots->lru_slot, start); - return &memslots[start]; + slot = try_get_memslot(slots, start, gfn); + if (slot) { + *index = start; + return slot; } return NULL; } +/* + * __gfn_to_memslot() and its descendants are here because it is called from + * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot() + * itself isn't here as an inline because that would bloat other code too much. + */ static inline struct kvm_memory_slot * __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) { - return search_memslots(slots, gfn); + struct kvm_memory_slot *slot; + int slot_index = atomic_read(&slots->last_used_slot); + + slot = try_get_memslot(slots, slot_index, gfn); + if (slot) + return slot; + + slot = search_memslots(slots, gfn, &slot_index); + if (slot) { + atomic_set(&slots->last_used_slot, slot_index); + return slot; + } + + return NULL; } static inline unsigned long @@ -1273,56 +1355,66 @@ struct _kvm_stats_desc { char name[KVM_STATS_NAME_SIZE]; }; -#define STATS_DESC_COMMON(type, unit, base, exp) \ +#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \ .flags = type | unit | base | \ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \ .exponent = exp, \ - .size = 1 + .size = sz, \ + .bucket_size = bsz -#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp) \ +#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ { \ { \ - STATS_DESC_COMMON(type, unit, base, exp), \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ .offset = offsetof(struct kvm_vm_stat, generic.stat) \ }, \ .name = #stat, \ } -#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp) \ +#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ { \ { \ - STATS_DESC_COMMON(type, unit, base, exp), \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \ }, \ .name = #stat, \ } -#define VM_STATS_DESC(stat, type, unit, base, exp) \ +#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ { \ { \ - STATS_DESC_COMMON(type, unit, base, exp), \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ .offset = offsetof(struct kvm_vm_stat, stat) \ }, \ .name = #stat, \ } -#define VCPU_STATS_DESC(stat, type, unit, base, exp) \ +#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \ { \ { \ - STATS_DESC_COMMON(type, unit, base, exp), \ + STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \ .offset = offsetof(struct kvm_vcpu_stat, stat) \ }, \ .name = #stat, \ } /* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */ -#define STATS_DESC(SCOPE, stat, type, unit, base, exp) \ - SCOPE##_STATS_DESC(stat, type, unit, base, exp) +#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \ + SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz) #define STATS_DESC_CUMULATIVE(SCOPE, name, unit, base, exponent) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, unit, base, exponent) + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_CUMULATIVE, \ + unit, base, exponent, 1, 0) #define STATS_DESC_INSTANT(SCOPE, name, unit, base, exponent) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, unit, base, exponent) + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_INSTANT, \ + unit, base, exponent, 1, 0) #define STATS_DESC_PEAK(SCOPE, name, unit, base, exponent) \ - STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, unit, base, exponent) + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_PEAK, \ + unit, base, exponent, 1, 0) +#define STATS_DESC_LINEAR_HIST(SCOPE, name, unit, base, exponent, sz, bsz) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LINEAR_HIST, \ + unit, base, exponent, sz, bsz) +#define STATS_DESC_LOG_HIST(SCOPE, name, unit, base, exponent, sz) \ + STATS_DESC(SCOPE, name, KVM_STATS_TYPE_LOG_HIST, \ + unit, base, exponent, sz, 0) /* Cumulative counter, read/write */ #define STATS_DESC_COUNTER(SCOPE, name) \ @@ -1341,9 +1433,18 @@ struct _kvm_stats_desc { #define STATS_DESC_TIME_NSEC(SCOPE, name) \ STATS_DESC_CUMULATIVE(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ KVM_STATS_BASE_POW10, -9) +/* Linear histogram for time in nanosecond */ +#define STATS_DESC_LINHIST_TIME_NSEC(SCOPE, name, sz, bsz) \ + STATS_DESC_LINEAR_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ + KVM_STATS_BASE_POW10, -9, sz, bsz) +/* Logarithmic histogram for time in nanosecond */ +#define STATS_DESC_LOGHIST_TIME_NSEC(SCOPE, name, sz) \ + STATS_DESC_LOG_HIST(SCOPE, name, KVM_STATS_UNIT_SECONDS, \ + KVM_STATS_BASE_POW10, -9, sz) #define KVM_GENERIC_VM_STATS() \ - STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush) + STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush), \ + STATS_DESC_COUNTER(VM_GENERIC, remote_tlb_flush_requests) #define KVM_GENERIC_VCPU_STATS() \ STATS_DESC_COUNTER(VCPU_GENERIC, halt_successful_poll), \ @@ -1351,13 +1452,62 @@ struct _kvm_stats_desc { STATS_DESC_COUNTER(VCPU_GENERIC, halt_poll_invalid), \ STATS_DESC_COUNTER(VCPU_GENERIC, halt_wakeup), \ STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_success_ns), \ - STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns) + STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_ns), \ + STATS_DESC_TIME_NSEC(VCPU_GENERIC, halt_wait_ns), \ + STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_success_hist, \ + HALT_POLL_HIST_COUNT), \ + STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \ + HALT_POLL_HIST_COUNT), \ + STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \ + HALT_POLL_HIST_COUNT) extern struct dentry *kvm_debugfs_dir; + ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, const struct _kvm_stats_desc *desc, void *stats, size_t size_stats, char __user *user_buffer, size_t size, loff_t *offset); + +/** + * kvm_stats_linear_hist_update() - Update bucket value for linear histogram + * statistics data. + * + * @data: start address of the stats data + * @size: the number of bucket of the stats data + * @value: the new value used to update the linear histogram's bucket + * @bucket_size: the size (width) of a bucket + */ +static inline void kvm_stats_linear_hist_update(u64 *data, size_t size, + u64 value, size_t bucket_size) +{ + size_t index = div64_u64(value, bucket_size); + + index = min(index, size - 1); + ++data[index]; +} + +/** + * kvm_stats_log_hist_update() - Update bucket value for logarithmic histogram + * statistics data. + * + * @data: start address of the stats data + * @size: the number of bucket of the stats data + * @value: the new value used to update the logarithmic histogram's bucket + */ +static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value) +{ + size_t index = fls64(value); + + index = min(index, size - 1); + ++data[index]; +} + +#define KVM_STATS_LINEAR_HIST_UPDATE(array, value, bsize) \ + kvm_stats_linear_hist_update(array, ARRAY_SIZE(array), value, bsize) +#define KVM_STATS_LOG_HIST_UPDATE(array, value) \ + kvm_stats_log_hist_update(array, ARRAY_SIZE(array), value) + + extern const struct kvm_stats_header kvm_vm_stats_header; extern const struct _kvm_stats_desc kvm_vm_stats_desc[]; extern const struct kvm_stats_header kvm_vcpu_stats_header; diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index ed6a985c5680..2237abb93ccd 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -76,8 +76,11 @@ struct kvm_mmu_memory_cache { }; #endif +#define HALT_POLL_HIST_COUNT 32 + struct kvm_vm_stat_generic { u64 remote_tlb_flush; + u64 remote_tlb_flush_requests; }; struct kvm_vcpu_stat_generic { @@ -87,6 +90,10 @@ struct kvm_vcpu_stat_generic { u64 halt_wakeup; u64 halt_poll_success_ns; u64 halt_poll_fail_ns; + u64 halt_wait_ns; + u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT]; + u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT]; + u64 halt_wait_hist[HALT_POLL_HIST_COUNT]; }; #define KVM_STATS_NAME_SIZE 48 diff --git a/include/linux/leds.h b/include/linux/leds.h index 329fd914cf24..a0b730be40ad 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -33,6 +33,12 @@ enum led_brightness { LED_FULL = 255, }; +enum led_default_state { + LEDS_DEFSTATE_OFF = 0, + LEDS_DEFSTATE_ON = 1, + LEDS_DEFSTATE_KEEP = 2, +}; + struct led_init_data { /* device fwnode handle */ struct fwnode_handle *fwnode; @@ -520,9 +526,9 @@ struct gpio_led { /* default_state should be one of LEDS_GPIO_DEFSTATE_(ON|OFF|KEEP) */ struct gpio_desc *gpiod; }; -#define LEDS_GPIO_DEFSTATE_OFF 0 -#define LEDS_GPIO_DEFSTATE_ON 1 -#define LEDS_GPIO_DEFSTATE_KEEP 2 +#define LEDS_GPIO_DEFSTATE_OFF LEDS_DEFSTATE_OFF +#define LEDS_GPIO_DEFSTATE_ON LEDS_DEFSTATE_ON +#define LEDS_GPIO_DEFSTATE_KEEP LEDS_DEFSTATE_KEEP struct gpio_led_platform_data { int num_leds; diff --git a/include/linux/libata.h b/include/linux/libata.h index 3fcd24236793..c0c64f03e107 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -161,6 +161,10 @@ enum { ATA_DFLAG_D_SENSE = (1 << 29), /* Descriptor sense requested */ ATA_DFLAG_ZAC = (1 << 30), /* ZAC device */ + ATA_DFLAG_FEATURES_MASK = ATA_DFLAG_TRUSTED | ATA_DFLAG_DA | \ + ATA_DFLAG_DEVSLP | ATA_DFLAG_NCQ_SEND_RECV | \ + ATA_DFLAG_NCQ_PRIO, + ATA_DEV_UNKNOWN = 0, /* unknown device */ ATA_DEV_ATA = 1, /* ATA device */ ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ @@ -422,6 +426,7 @@ enum { ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ + ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ @@ -535,6 +540,7 @@ typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes) extern struct device_attribute dev_attr_unload_heads; #ifdef CONFIG_SATA_HOST extern struct device_attribute dev_attr_link_power_management_policy; +extern struct device_attribute dev_attr_ncq_prio_supported; extern struct device_attribute dev_attr_ncq_prio_enable; extern struct device_attribute dev_attr_em_message_type; extern struct device_attribute dev_attr_em_message; @@ -1454,7 +1460,7 @@ static inline bool sata_pmp_attached(struct ata_port *ap) static inline bool ata_is_host_link(const struct ata_link *link) { - return 1; + return true; } #endif /* CONFIG_SATA_PMP */ diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h deleted file mode 100644 index 0908abda9c1b..000000000000 --- a/include/linux/lightnvm.h +++ /dev/null @@ -1,697 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef NVM_H -#define NVM_H - -#include <linux/blkdev.h> -#include <linux/types.h> -#include <uapi/linux/lightnvm.h> - -enum { - NVM_IO_OK = 0, - NVM_IO_REQUEUE = 1, - NVM_IO_DONE = 2, - NVM_IO_ERR = 3, - - NVM_IOTYPE_NONE = 0, - NVM_IOTYPE_GC = 1, -}; - -/* common format */ -#define NVM_GEN_CH_BITS (8) -#define NVM_GEN_LUN_BITS (8) -#define NVM_GEN_BLK_BITS (16) -#define NVM_GEN_RESERVED (32) - -/* 1.2 format */ -#define NVM_12_PG_BITS (16) -#define NVM_12_PL_BITS (4) -#define NVM_12_SEC_BITS (4) -#define NVM_12_RESERVED (8) - -/* 2.0 format */ -#define NVM_20_SEC_BITS (24) -#define NVM_20_RESERVED (8) - -enum { - NVM_OCSSD_SPEC_12 = 12, - NVM_OCSSD_SPEC_20 = 20, -}; - -struct ppa_addr { - /* Generic structure for all addresses */ - union { - /* generic device format */ - struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 reserved : NVM_GEN_RESERVED; - } a; - - /* 1.2 device format */ - struct { - u64 ch : NVM_GEN_CH_BITS; - u64 lun : NVM_GEN_LUN_BITS; - u64 blk : NVM_GEN_BLK_BITS; - u64 pg : NVM_12_PG_BITS; - u64 pl : NVM_12_PL_BITS; - u64 sec : NVM_12_SEC_BITS; - u64 reserved : NVM_12_RESERVED; - } g; - - /* 2.0 device format */ - struct { - u64 grp : NVM_GEN_CH_BITS; - u64 pu : NVM_GEN_LUN_BITS; - u64 chk : NVM_GEN_BLK_BITS; - u64 sec : NVM_20_SEC_BITS; - u64 reserved : NVM_20_RESERVED; - } m; - - struct { - u64 line : 63; - u64 is_cached : 1; - } c; - - u64 ppa; - }; -}; - -struct nvm_rq; -struct nvm_id; -struct nvm_dev; -struct nvm_tgt_dev; -struct nvm_chk_meta; - -typedef int (nvm_id_fn)(struct nvm_dev *); -typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); -typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); -typedef int (nvm_get_chk_meta_fn)(struct nvm_dev *, sector_t, int, - struct nvm_chk_meta *); -typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *, void *); -typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *, int); -typedef void (nvm_destroy_dma_pool_fn)(void *); -typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t, - dma_addr_t *); -typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); - -struct nvm_dev_ops { - nvm_id_fn *identity; - nvm_op_bb_tbl_fn *get_bb_tbl; - nvm_op_set_bb_fn *set_bb_tbl; - - nvm_get_chk_meta_fn *get_chk_meta; - - nvm_submit_io_fn *submit_io; - - nvm_create_dma_pool_fn *create_dma_pool; - nvm_destroy_dma_pool_fn *destroy_dma_pool; - nvm_dev_dma_alloc_fn *dev_dma_alloc; - nvm_dev_dma_free_fn *dev_dma_free; -}; - -#ifdef CONFIG_NVM - -#include <linux/file.h> -#include <linux/dmapool.h> - -enum { - /* HW Responsibilities */ - NVM_RSP_L2P = 1 << 0, - NVM_RSP_ECC = 1 << 1, - - /* Physical Adressing Mode */ - NVM_ADDRMODE_LINEAR = 0, - NVM_ADDRMODE_CHANNEL = 1, - - /* Plane programming mode for LUN */ - NVM_PLANE_SINGLE = 1, - NVM_PLANE_DOUBLE = 2, - NVM_PLANE_QUAD = 4, - - /* Status codes */ - NVM_RSP_SUCCESS = 0x0, - NVM_RSP_NOT_CHANGEABLE = 0x1, - NVM_RSP_ERR_FAILWRITE = 0x40ff, - NVM_RSP_ERR_EMPTYPAGE = 0x42ff, - NVM_RSP_ERR_FAILECC = 0x4281, - NVM_RSP_ERR_FAILCRC = 0x4004, - NVM_RSP_WARN_HIGHECC = 0x4700, - - /* Device opcodes */ - NVM_OP_PWRITE = 0x91, - NVM_OP_PREAD = 0x92, - NVM_OP_ERASE = 0x90, - - /* PPA Command Flags */ - NVM_IO_SNGL_ACCESS = 0x0, - NVM_IO_DUAL_ACCESS = 0x1, - NVM_IO_QUAD_ACCESS = 0x2, - - /* NAND Access Modes */ - NVM_IO_SUSPEND = 0x80, - NVM_IO_SLC_MODE = 0x100, - NVM_IO_SCRAMBLE_ENABLE = 0x200, - - /* Block Types */ - NVM_BLK_T_FREE = 0x0, - NVM_BLK_T_BAD = 0x1, - NVM_BLK_T_GRWN_BAD = 0x2, - NVM_BLK_T_DEV = 0x4, - NVM_BLK_T_HOST = 0x8, - - /* Memory capabilities */ - NVM_ID_CAP_SLC = 0x1, - NVM_ID_CAP_CMD_SUSPEND = 0x2, - NVM_ID_CAP_SCRAMBLE = 0x4, - NVM_ID_CAP_ENCRYPT = 0x8, - - /* Memory types */ - NVM_ID_FMTYPE_SLC = 0, - NVM_ID_FMTYPE_MLC = 1, - - /* Device capabilities */ - NVM_ID_DCAP_BBLKMGMT = 0x1, - NVM_UD_DCAP_ECC = 0x2, -}; - -struct nvm_id_lp_mlc { - u16 num_pairs; - u8 pairs[886]; -}; - -struct nvm_id_lp_tbl { - __u8 id[8]; - struct nvm_id_lp_mlc mlc; -}; - -struct nvm_addrf_12 { - u8 ch_len; - u8 lun_len; - u8 blk_len; - u8 pg_len; - u8 pln_len; - u8 sec_len; - - u8 ch_offset; - u8 lun_offset; - u8 blk_offset; - u8 pg_offset; - u8 pln_offset; - u8 sec_offset; - - u64 ch_mask; - u64 lun_mask; - u64 blk_mask; - u64 pg_mask; - u64 pln_mask; - u64 sec_mask; -}; - -struct nvm_addrf { - u8 ch_len; - u8 lun_len; - u8 chk_len; - u8 sec_len; - u8 rsv_len[2]; - - u8 ch_offset; - u8 lun_offset; - u8 chk_offset; - u8 sec_offset; - u8 rsv_off[2]; - - u64 ch_mask; - u64 lun_mask; - u64 chk_mask; - u64 sec_mask; - u64 rsv_mask[2]; -}; - -enum { - /* Chunk states */ - NVM_CHK_ST_FREE = 1 << 0, - NVM_CHK_ST_CLOSED = 1 << 1, - NVM_CHK_ST_OPEN = 1 << 2, - NVM_CHK_ST_OFFLINE = 1 << 3, - - /* Chunk types */ - NVM_CHK_TP_W_SEQ = 1 << 0, - NVM_CHK_TP_W_RAN = 1 << 1, - NVM_CHK_TP_SZ_SPEC = 1 << 4, -}; - -/* - * Note: The structure size is linked to nvme_nvm_chk_meta such that the same - * buffer can be used when converting from little endian to cpu addressing. - */ -struct nvm_chk_meta { - u8 state; - u8 type; - u8 wi; - u8 rsvd[5]; - u64 slba; - u64 cnlb; - u64 wp; -}; - -struct nvm_target { - struct list_head list; - struct nvm_tgt_dev *dev; - struct nvm_tgt_type *type; - struct gendisk *disk; -}; - -#define ADDR_EMPTY (~0ULL) - -#define NVM_TARGET_DEFAULT_OP (101) -#define NVM_TARGET_MIN_OP (3) -#define NVM_TARGET_MAX_OP (80) - -#define NVM_VERSION_MAJOR 1 -#define NVM_VERSION_MINOR 0 -#define NVM_VERSION_PATCH 0 - -#define NVM_MAX_VLBA (64) /* max logical blocks in a vector command */ - -struct nvm_rq; -typedef void (nvm_end_io_fn)(struct nvm_rq *); - -struct nvm_rq { - struct nvm_tgt_dev *dev; - - struct bio *bio; - - union { - struct ppa_addr ppa_addr; - dma_addr_t dma_ppa_list; - }; - - struct ppa_addr *ppa_list; - - void *meta_list; - dma_addr_t dma_meta_list; - - nvm_end_io_fn *end_io; - - uint8_t opcode; - uint16_t nr_ppas; - uint16_t flags; - - u64 ppa_status; /* ppa media status */ - int error; - - int is_seq; /* Sequential hint flag. 1.2 only */ - - void *private; -}; - -static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu) -{ - return pdu - sizeof(struct nvm_rq); -} - -static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata) -{ - return rqdata + 1; -} - -static inline struct ppa_addr *nvm_rq_to_ppa_list(struct nvm_rq *rqd) -{ - return (rqd->nr_ppas > 1) ? rqd->ppa_list : &rqd->ppa_addr; -} - -enum { - NVM_BLK_ST_FREE = 0x1, /* Free block */ - NVM_BLK_ST_TGT = 0x2, /* Block in use by target */ - NVM_BLK_ST_BAD = 0x8, /* Bad block */ -}; - -/* Instance geometry */ -struct nvm_geo { - /* device reported version */ - u8 major_ver_id; - u8 minor_ver_id; - - /* kernel short version */ - u8 version; - - /* instance specific geometry */ - int num_ch; - int num_lun; /* per channel */ - - /* calculated values */ - int all_luns; /* across channels */ - int all_chunks; /* across channels */ - - int op; /* over-provision in instance */ - - sector_t total_secs; /* across channels */ - - /* chunk geometry */ - u32 num_chk; /* chunks per lun */ - u32 clba; /* sectors per chunk */ - u16 csecs; /* sector size */ - u16 sos; /* out-of-band area size */ - bool ext; /* metadata in extended data buffer */ - u32 mdts; /* Max data transfer size*/ - - /* device write constrains */ - u32 ws_min; /* minimum write size */ - u32 ws_opt; /* optimal write size */ - u32 mw_cunits; /* distance required for successful read */ - u32 maxoc; /* maximum open chunks */ - u32 maxocpu; /* maximum open chunks per parallel unit */ - - /* device capabilities */ - u32 mccap; - - /* device timings */ - u32 trdt; /* Avg. Tread (ns) */ - u32 trdm; /* Max Tread (ns) */ - u32 tprt; /* Avg. Tprog (ns) */ - u32 tprm; /* Max Tprog (ns) */ - u32 tbet; /* Avg. Terase (ns) */ - u32 tbem; /* Max Terase (ns) */ - - /* generic address format */ - struct nvm_addrf addrf; - - /* 1.2 compatibility */ - u8 vmnt; - u32 cap; - u32 dom; - - u8 mtype; - u8 fmtype; - - u16 cpar; - u32 mpos; - - u8 num_pln; - u8 pln_mode; - u16 num_pg; - u16 fpg_sz; -}; - -/* sub-device structure */ -struct nvm_tgt_dev { - /* Device information */ - struct nvm_geo geo; - - /* Base ppas for target LUNs */ - struct ppa_addr *luns; - - struct request_queue *q; - - struct nvm_dev *parent; - void *map; -}; - -struct nvm_dev { - struct nvm_dev_ops *ops; - - struct list_head devices; - - /* Device information */ - struct nvm_geo geo; - - unsigned long *lun_map; - void *dma_pool; - - /* Backend device */ - struct request_queue *q; - char name[DISK_NAME_LEN]; - void *private_data; - - struct kref ref; - void *rmap; - - struct mutex mlock; - spinlock_t lock; - - /* target management */ - struct list_head area_list; - struct list_head targets; -}; - -static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr l; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.ppa = ((u64)r.g.ch) << ppaf->ch_offset; - l.ppa |= ((u64)r.g.lun) << ppaf->lun_offset; - l.ppa |= ((u64)r.g.blk) << ppaf->blk_offset; - l.ppa |= ((u64)r.g.pg) << ppaf->pg_offset; - l.ppa |= ((u64)r.g.pl) << ppaf->pln_offset; - l.ppa |= ((u64)r.g.sec) << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.ppa = ((u64)r.m.grp) << lbaf->ch_offset; - l.ppa |= ((u64)r.m.pu) << lbaf->lun_offset; - l.ppa |= ((u64)r.m.chk) << lbaf->chk_offset; - l.ppa |= ((u64)r.m.sec) << lbaf->sec_offset; - } - - return l; -} - -static inline struct ppa_addr dev_to_generic_addr(struct nvm_dev *dev, - struct ppa_addr r) -{ - struct nvm_geo *geo = &dev->geo; - struct ppa_addr l; - - l.ppa = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)&geo->addrf; - - l.g.ch = (r.ppa & ppaf->ch_mask) >> ppaf->ch_offset; - l.g.lun = (r.ppa & ppaf->lun_mask) >> ppaf->lun_offset; - l.g.blk = (r.ppa & ppaf->blk_mask) >> ppaf->blk_offset; - l.g.pg = (r.ppa & ppaf->pg_mask) >> ppaf->pg_offset; - l.g.pl = (r.ppa & ppaf->pln_mask) >> ppaf->pln_offset; - l.g.sec = (r.ppa & ppaf->sec_mask) >> ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = &geo->addrf; - - l.m.grp = (r.ppa & lbaf->ch_mask) >> lbaf->ch_offset; - l.m.pu = (r.ppa & lbaf->lun_mask) >> lbaf->lun_offset; - l.m.chk = (r.ppa & lbaf->chk_mask) >> lbaf->chk_offset; - l.m.sec = (r.ppa & lbaf->sec_mask) >> lbaf->sec_offset; - } - - return l; -} - -static inline u64 dev_to_chunk_addr(struct nvm_dev *dev, void *addrf, - struct ppa_addr p) -{ - struct nvm_geo *geo = &dev->geo; - u64 caddr; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = (struct nvm_addrf_12 *)addrf; - - caddr = (u64)p.g.pg << ppaf->pg_offset; - caddr |= (u64)p.g.pl << ppaf->pln_offset; - caddr |= (u64)p.g.sec << ppaf->sec_offset; - } else { - caddr = p.m.sec; - } - - return caddr; -} - -static inline struct ppa_addr nvm_ppa32_to_ppa64(struct nvm_dev *dev, - void *addrf, u32 ppa32) -{ - struct ppa_addr ppa64; - - ppa64.ppa = 0; - - if (ppa32 == -1) { - ppa64.ppa = ADDR_EMPTY; - } else if (ppa32 & (1U << 31)) { - ppa64.c.line = ppa32 & ((~0U) >> 1); - ppa64.c.is_cached = 1; - } else { - struct nvm_geo *geo = &dev->geo; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa64.g.ch = (ppa32 & ppaf->ch_mask) >> - ppaf->ch_offset; - ppa64.g.lun = (ppa32 & ppaf->lun_mask) >> - ppaf->lun_offset; - ppa64.g.blk = (ppa32 & ppaf->blk_mask) >> - ppaf->blk_offset; - ppa64.g.pg = (ppa32 & ppaf->pg_mask) >> - ppaf->pg_offset; - ppa64.g.pl = (ppa32 & ppaf->pln_mask) >> - ppaf->pln_offset; - ppa64.g.sec = (ppa32 & ppaf->sec_mask) >> - ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa64.m.grp = (ppa32 & lbaf->ch_mask) >> - lbaf->ch_offset; - ppa64.m.pu = (ppa32 & lbaf->lun_mask) >> - lbaf->lun_offset; - ppa64.m.chk = (ppa32 & lbaf->chk_mask) >> - lbaf->chk_offset; - ppa64.m.sec = (ppa32 & lbaf->sec_mask) >> - lbaf->sec_offset; - } - } - - return ppa64; -} - -static inline u32 nvm_ppa64_to_ppa32(struct nvm_dev *dev, - void *addrf, struct ppa_addr ppa64) -{ - u32 ppa32 = 0; - - if (ppa64.ppa == ADDR_EMPTY) { - ppa32 = ~0U; - } else if (ppa64.c.is_cached) { - ppa32 |= ppa64.c.line; - ppa32 |= 1U << 31; - } else { - struct nvm_geo *geo = &dev->geo; - - if (geo->version == NVM_OCSSD_SPEC_12) { - struct nvm_addrf_12 *ppaf = addrf; - - ppa32 |= ppa64.g.ch << ppaf->ch_offset; - ppa32 |= ppa64.g.lun << ppaf->lun_offset; - ppa32 |= ppa64.g.blk << ppaf->blk_offset; - ppa32 |= ppa64.g.pg << ppaf->pg_offset; - ppa32 |= ppa64.g.pl << ppaf->pln_offset; - ppa32 |= ppa64.g.sec << ppaf->sec_offset; - } else { - struct nvm_addrf *lbaf = addrf; - - ppa32 |= ppa64.m.grp << lbaf->ch_offset; - ppa32 |= ppa64.m.pu << lbaf->lun_offset; - ppa32 |= ppa64.m.chk << lbaf->chk_offset; - ppa32 |= ppa64.m.sec << lbaf->sec_offset; - } - } - - return ppa32; -} - -static inline int nvm_next_ppa_in_chk(struct nvm_tgt_dev *dev, - struct ppa_addr *ppa) -{ - struct nvm_geo *geo = &dev->geo; - int last = 0; - - if (geo->version == NVM_OCSSD_SPEC_12) { - int sec = ppa->g.sec; - - sec++; - if (sec == geo->ws_min) { - int pg = ppa->g.pg; - - sec = 0; - pg++; - if (pg == geo->num_pg) { - int pl = ppa->g.pl; - - pg = 0; - pl++; - if (pl == geo->num_pln) - last = 1; - - ppa->g.pl = pl; - } - ppa->g.pg = pg; - } - ppa->g.sec = sec; - } else { - ppa->m.sec++; - if (ppa->m.sec == geo->clba) - last = 1; - } - - return last; -} - -typedef sector_t (nvm_tgt_capacity_fn)(void *); -typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, - int flags); -typedef void (nvm_tgt_exit_fn)(void *, bool); -typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *); -typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *); - -enum { - NVM_TGT_F_DEV_L2P = 0, - NVM_TGT_F_HOST_L2P = 1 << 0, -}; - -struct nvm_tgt_type { - const char *name; - unsigned int version[3]; - int flags; - - /* target entry points */ - const struct block_device_operations *bops; - nvm_tgt_capacity_fn *capacity; - - /* module-specific init/teardown */ - nvm_tgt_init_fn *init; - nvm_tgt_exit_fn *exit; - - /* sysfs */ - nvm_tgt_sysfs_init_fn *sysfs_init; - nvm_tgt_sysfs_exit_fn *sysfs_exit; - - /* For internal use */ - struct list_head list; - struct module *owner; -}; - -extern int nvm_register_tgt_type(struct nvm_tgt_type *); -extern void nvm_unregister_tgt_type(struct nvm_tgt_type *); - -extern void *nvm_dev_dma_alloc(struct nvm_dev *, gfp_t, dma_addr_t *); -extern void nvm_dev_dma_free(struct nvm_dev *, void *, dma_addr_t); - -extern struct nvm_dev *nvm_alloc_dev(int); -extern int nvm_register(struct nvm_dev *); -extern void nvm_unregister(struct nvm_dev *); - -extern int nvm_get_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr, - int, struct nvm_chk_meta *); -extern int nvm_set_chunk_meta(struct nvm_tgt_dev *, struct ppa_addr *, - int, int); -extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *, void *); -extern void nvm_end_io(struct nvm_rq *); - -#else /* CONFIG_NVM */ -struct nvm_dev_ops; - -static inline struct nvm_dev *nvm_alloc_dev(int node) -{ - return ERR_PTR(-EINVAL); -} -static inline int nvm_register(struct nvm_dev *dev) -{ - return -EINVAL; -} -static inline void nvm_unregister(struct nvm_dev *dev) {} -#endif /* CONFIG_NVM */ -#endif /* LIGHTNVM.H */ diff --git a/include/linux/linear_range.h b/include/linux/linear_range.h index 17b5943727d5..fd3d0b358f22 100644 --- a/include/linux/linear_range.h +++ b/include/linux/linear_range.h @@ -41,6 +41,8 @@ int linear_range_get_selector_low(const struct linear_range *r, int linear_range_get_selector_high(const struct linear_range *r, unsigned int val, unsigned int *selector, bool *found); +void linear_range_get_selector_within(const struct linear_range *r, + unsigned int val, unsigned int *selector); int linear_range_get_selector_low_array(const struct linear_range *r, int ranges, unsigned int val, unsigned int *selector, bool *found); diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index ded90b097e6e..975e33b793a7 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -6,6 +6,8 @@ #include <linux/percpu-defs.h> #include <linux/lockdep.h> +#ifndef CONFIG_PREEMPT_RT + typedef struct { #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -14,29 +16,14 @@ typedef struct { } local_lock_t; #ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LL_DEP_MAP_INIT(lockname) \ +# define LOCAL_LOCK_DEBUG_INIT(lockname) \ .dep_map = { \ .name = #lockname, \ .wait_type_inner = LD_WAIT_CONFIG, \ - .lock_type = LD_LOCK_PERCPU, \ - } -#else -# define LL_DEP_MAP_INIT(lockname) -#endif - -#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) } - -#define __local_lock_init(lock) \ -do { \ - static struct lock_class_key __key; \ - \ - debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ - lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \ - LD_WAIT_CONFIG, LD_WAIT_INV, \ - LD_LOCK_PERCPU); \ -} while (0) + .lock_type = LD_LOCK_PERCPU, \ + }, \ + .owner = NULL, -#ifdef CONFIG_DEBUG_LOCK_ALLOC static inline void local_lock_acquire(local_lock_t *l) { lock_map_acquire(&l->dep_map); @@ -51,11 +38,30 @@ static inline void local_lock_release(local_lock_t *l) lock_map_release(&l->dep_map); } +static inline void local_lock_debug_init(local_lock_t *l) +{ + l->owner = NULL; +} #else /* CONFIG_DEBUG_LOCK_ALLOC */ +# define LOCAL_LOCK_DEBUG_INIT(lockname) static inline void local_lock_acquire(local_lock_t *l) { } static inline void local_lock_release(local_lock_t *l) { } +static inline void local_lock_debug_init(local_lock_t *l) { } #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ +#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) } + +#define __local_lock_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ + 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ + LD_LOCK_PERCPU); \ + local_lock_debug_init(lock); \ +} while (0) + #define __local_lock(lock) \ do { \ preempt_disable(); \ @@ -91,3 +97,45 @@ static inline void local_lock_release(local_lock_t *l) { } local_lock_release(this_cpu_ptr(lock)); \ local_irq_restore(flags); \ } while (0) + +#else /* !CONFIG_PREEMPT_RT */ + +/* + * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the + * critical section while staying preemptible. + */ +typedef spinlock_t local_lock_t; + +#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname)) + +#define __local_lock_init(l) \ + do { \ + local_spin_lock_init((l)); \ + } while (0) + +#define __local_lock(__lock) \ + do { \ + migrate_disable(); \ + spin_lock(this_cpu_ptr((__lock))); \ + } while (0) + +#define __local_lock_irq(lock) __local_lock(lock) + +#define __local_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __local_lock(lock); \ + } while (0) + +#define __local_unlock(__lock) \ + do { \ + spin_unlock(this_cpu_ptr((__lock))); \ + migrate_enable(); \ + } while (0) + +#define __local_unlock_irq(lock) __local_unlock(lock) + +#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) + +#endif /* CONFIG_PREEMPT_RT */ diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 0520c0cd73f4..3bc9f7410e21 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h @@ -27,7 +27,8 @@ struct rpc_task; struct nlmsvc_binding { __be32 (*fopen)(struct svc_rqst *, struct nfs_fh *, - struct file **); + struct file **, + int mode); void (*fclose)(struct file *); }; diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 666f5f310a04..c4ae6506b8b3 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -10,6 +10,8 @@ #ifndef LINUX_LOCKD_LOCKD_H #define LINUX_LOCKD_LOCKD_H +/* XXX: a lot of this should really be under fs/lockd. */ + #include <linux/in.h> #include <linux/in6.h> #include <net/ipv6.h> @@ -154,7 +156,8 @@ struct nlm_rqst { struct nlm_file { struct hlist_node f_list; /* linked list */ struct nfs_fh f_handle; /* NFS file handle */ - struct file * f_file; /* VFS file pointer */ + struct file * f_file[2]; /* VFS file pointers, + indexed by O_ flags */ struct nlm_share * f_shares; /* DOS shares */ struct list_head f_blocks; /* blocked locks */ unsigned int f_locks; /* guesstimate # of locks */ @@ -267,6 +270,7 @@ typedef int (*nlm_host_match_fn_t)(void *cur, struct nlm_host *ref); /* * Server-side lock handling */ +int lock_to_openmode(struct file_lock *); __be32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *, struct nlm_host *, struct nlm_lock *, int, struct nlm_cookie *, int); @@ -286,7 +290,7 @@ void nlmsvc_locks_init_private(struct file_lock *, struct nlm_host *, pid_t); * File handling for the server personality */ __be32 nlm_lookup_file(struct svc_rqst *, struct nlm_file **, - struct nfs_fh *); + struct nlm_lock *); void nlm_release_file(struct nlm_file *); void nlmsvc_release_lockowner(struct nlm_lock *); void nlmsvc_mark_resources(struct net *); @@ -301,7 +305,8 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr); static inline struct inode *nlmsvc_file_inode(struct nlm_file *file) { - return locks_inode(file->f_file); + return locks_inode(file->f_file[O_RDONLY] ? + file->f_file[O_RDONLY] : file->f_file[O_WRONLY]); } static inline int __nlm_privileged_request4(const struct sockaddr *sap) diff --git a/include/linux/mISDNif.h b/include/linux/mISDNif.h index a7330eb3ec64..7dd1f01ec4f9 100644 --- a/include/linux/mISDNif.h +++ b/include/linux/mISDNif.h @@ -18,7 +18,6 @@ #ifndef mISDNIF_H #define mISDNIF_H -#include <stdarg.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/socket.h> diff --git a/include/linux/mdev.h b/include/linux/mdev.h index 3a38598c2605..68427e8fadeb 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h @@ -72,11 +72,6 @@ struct device *mtype_get_parent_dev(struct mdev_type *mtype); * @mdev: mdev_device device structure which is being * destroyed * Returns integer: success (0) or error (< 0) - * @open: Open mediated device. - * @mdev: mediated device. - * Returns integer: success (0) or error (< 0) - * @release: release mediated device - * @mdev: mediated device. * @read: Read emulation callback * @mdev: mediated device structure * @buf: read buffer @@ -111,8 +106,8 @@ struct mdev_parent_ops { int (*create)(struct mdev_device *mdev); int (*remove)(struct mdev_device *mdev); - int (*open)(struct mdev_device *mdev); - void (*release)(struct mdev_device *mdev); + int (*open_device)(struct mdev_device *mdev); + void (*close_device)(struct mdev_device *mdev); ssize_t (*read)(struct mdev_device *mdev, char __user *buf, size_t count, loff_t *ppos); ssize_t (*write)(struct mdev_device *mdev, const char __user *buf, diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 07f5ef8fc456..c6786c12b207 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -91,12 +91,13 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv); mei_cldev_driver_register,\ mei_cldev_driver_unregister) -ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); +ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, + size_t length); ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf, size_t length); -ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, - u8 vtag); +ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf, + size_t length, u8 vtag); ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length, u8 *vtag); ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf, @@ -114,6 +115,6 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data); int mei_cldev_enable(struct mei_cl_device *cldev); int mei_cldev_disable(struct mei_cl_device *cldev); -bool mei_cldev_enabled(struct mei_cl_device *cldev); +bool mei_cldev_enabled(const struct mei_cl_device *cldev); #endif /* _LINUX_MEI_CL_BUS_H */ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 4a53c3ca86bd..b066024c62e3 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -99,8 +99,6 @@ void memblock_discard(void); static inline void memblock_discard(void) {} #endif -phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, - phys_addr_t size, phys_addr_t align); void memblock_allow_resize(void); int memblock_add_node(phys_addr_t base, phys_addr_t size, int nid); int memblock_add(phys_addr_t base, phys_addr_t size); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index bfe5c486f4ad..3096c9a0ee01 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -105,14 +105,6 @@ struct mem_cgroup_reclaim_iter { unsigned int generation; }; -struct lruvec_stat { - long count[NR_VM_NODE_STAT_ITEMS]; -}; - -struct batched_lruvec_stat { - s32 count[NR_VM_NODE_STAT_ITEMS]; -}; - /* * Bitmap and deferred work of shrinker::id corresponding to memcg-aware * shrinkers, which have elements charged to this memcg. @@ -123,24 +115,30 @@ struct shrinker_info { unsigned long *map; }; +struct lruvec_stats_percpu { + /* Local (CPU and cgroup) state */ + long state[NR_VM_NODE_STAT_ITEMS]; + + /* Delta calculation for lockless upward propagation */ + long state_prev[NR_VM_NODE_STAT_ITEMS]; +}; + +struct lruvec_stats { + /* Aggregated (CPU and subtree) state */ + long state[NR_VM_NODE_STAT_ITEMS]; + + /* Pending child counts during tree propagation */ + long state_pending[NR_VM_NODE_STAT_ITEMS]; +}; + /* * per-node information in memory controller. */ struct mem_cgroup_per_node { struct lruvec lruvec; - /* - * Legacy local VM stats. This should be struct lruvec_stat and - * cannot be optimized to struct batched_lruvec_stat. Because - * the threshold of the lruvec_stat_cpu can be as big as - * MEMCG_CHARGE_BATCH * PAGE_SIZE. It can fit into s32. But this - * filed has no upper limit. - */ - struct lruvec_stat __percpu *lruvec_stat_local; - - /* Subtree VM stats (batched updates) */ - struct batched_lruvec_stat __percpu *lruvec_stat_cpu; - atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; + struct lruvec_stats_percpu __percpu *lruvec_stats_percpu; + struct lruvec_stats lruvec_stats; unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; @@ -595,13 +593,6 @@ static inline struct obj_cgroup **page_objcgs_check(struct page *page) } #endif -static __always_inline bool memcg_stat_item_in_bytes(int idx) -{ - if (idx == MEMCG_PERCPU_B) - return true; - return vmstat_item_in_bytes(idx); -} - static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg) { return (memcg == root_mem_cgroup); @@ -612,12 +603,15 @@ static inline bool mem_cgroup_disabled(void) return !cgroup_subsys_enabled(memory_cgrp_subsys); } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { + *min = *low = 0; + if (mem_cgroup_disabled()) - return 0; + return; /* * There is no reclaim protection applied to a targeted reclaim. @@ -653,13 +647,10 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, * */ if (root == memcg) - return 0; - - if (in_low_reclaim) - return READ_ONCE(memcg->memory.emin); + return; - return max(READ_ONCE(memcg->memory.emin), - READ_ONCE(memcg->memory.elow)); + *min = READ_ONCE(memcg->memory.emin); + *low = READ_ONCE(memcg->memory.elow); } void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -693,13 +684,35 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *memcg) page_counter_read(&memcg->memory); } -int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask); +int __mem_cgroup_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask); +static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm, + gfp_t gfp_mask) +{ + if (mem_cgroup_disabled()) + return 0; + return __mem_cgroup_charge(page, mm, gfp_mask); +} + int mem_cgroup_swapin_charge_page(struct page *page, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); -void mem_cgroup_uncharge(struct page *page); -void mem_cgroup_uncharge_list(struct list_head *page_list); +void __mem_cgroup_uncharge(struct page *page); +static inline void mem_cgroup_uncharge(struct page *page) +{ + if (mem_cgroup_disabled()) + return; + __mem_cgroup_uncharge(page); +} + +void __mem_cgroup_uncharge_list(struct list_head *page_list); +static inline void mem_cgroup_uncharge_list(struct list_head *page_list) +{ + if (mem_cgroup_disabled()) + return; + __mem_cgroup_uncharge_list(page_list); +} void mem_cgroup_migrate(struct page *oldpage, struct page *newpage); @@ -884,11 +897,6 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg) return !!(memcg->css.flags & CSS_ONLINE); } -/* - * For memory reclaim. - */ -int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); - void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, int zid, int nr_pages); @@ -955,22 +963,21 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, local_irq_restore(flags); } +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +{ + return READ_ONCE(memcg->vmstats.state[idx]); +} + static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { struct mem_cgroup_per_node *pn; - long x; if (mem_cgroup_disabled()) return node_page_state(lruvec_pgdat(lruvec), idx); pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); - x = atomic_long_read(&pn->lruvec_stat[idx]); -#ifdef CONFIG_SMP - if (x < 0) - x = 0; -#endif - return x; + return READ_ONCE(pn->lruvec_stats.state[idx]); } static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, @@ -985,7 +992,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); for_each_possible_cpu(cpu) - x += per_cpu(pn->lruvec_stat_local->count[idx], cpu); + x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu); #ifdef CONFIG_SMP if (x < 0) x = 0; @@ -993,6 +1000,8 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return x; } +void mem_cgroup_flush_stats(void); + void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val); void __mod_lruvec_kmem_state(void *p, enum node_stat_item idx, int val); @@ -1147,11 +1156,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, { } -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *root, - struct mem_cgroup *memcg, - bool in_low_reclaim) +static inline void mem_cgroup_protection(struct mem_cgroup *root, + struct mem_cgroup *memcg, + unsigned long *min, + unsigned long *low) { - return 0; + *min = *low = 0; } static inline void mem_cgroup_calculate_protection(struct mem_cgroup *root, @@ -1390,6 +1400,11 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg, { } +static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx) +{ + return 0; +} + static inline unsigned long lruvec_page_state(struct lruvec *lruvec, enum node_stat_item idx) { @@ -1402,6 +1417,10 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, return node_page_state(lruvec_pgdat(lruvec), idx); } +static inline void mem_cgroup_flush_stats(void) +{ +} + static inline void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, int val) { @@ -1581,7 +1600,8 @@ static inline void mem_cgroup_flush_foreign(struct bdi_writeback *wb) #endif /* CONFIG_CGROUP_WRITEBACK */ struct sock; -bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); +bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages, + gfp_t gfp_mask); void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages); #ifdef CONFIG_MEMCG extern struct static_key_false memcg_sockets_enabled_key; diff --git a/include/linux/memory.h b/include/linux/memory.h index 97e92e8b556a..7efc0a7c14c9 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -23,6 +23,48 @@ #define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) +/** + * struct memory_group - a logical group of memory blocks + * @nid: The node id for all memory blocks inside the memory group. + * @blocks: List of all memory blocks belonging to this memory group. + * @present_kernel_pages: Present (online) memory outside ZONE_MOVABLE of this + * memory group. + * @present_movable_pages: Present (online) memory in ZONE_MOVABLE of this + * memory group. + * @is_dynamic: The memory group type: static vs. dynamic + * @s.max_pages: Valid with &memory_group.is_dynamic == false. The maximum + * number of pages we'll have in this static memory group. + * @d.unit_pages: Valid with &memory_group.is_dynamic == true. Unit in pages + * in which memory is added/removed in this dynamic memory group. + * This granularity defines the alignment of a unit in physical + * address space; it has to be at least as big as a single + * memory block. + * + * A memory group logically groups memory blocks; each memory block + * belongs to at most one memory group. A memory group corresponds to + * a memory device, such as a DIMM or a NUMA node, which spans multiple + * memory blocks and might even span multiple non-contiguous physical memory + * ranges. + * + * Modification of members after registration is serialized by memory + * hot(un)plug code. + */ +struct memory_group { + int nid; + struct list_head memory_blocks; + unsigned long present_kernel_pages; + unsigned long present_movable_pages; + bool is_dynamic; + union { + struct { + unsigned long max_pages; + } s; + struct { + unsigned long unit_pages; + } d; + }; +}; + struct memory_block { unsigned long start_section_nr; unsigned long state; /* serialized by the dev->lock */ @@ -34,6 +76,8 @@ struct memory_block { * lay at the beginning of the memory block. */ unsigned long nr_vmemmap_pages; + struct memory_group *group; /* group (if any) for this block */ + struct list_head group_next; /* next block inside memory group */ }; int arch_get_memory_phys_device(unsigned long start_pfn); @@ -86,16 +130,25 @@ static inline int memory_notify(unsigned long val, void *v) extern int register_memory_notifier(struct notifier_block *nb); extern void unregister_memory_notifier(struct notifier_block *nb); int create_memory_block_devices(unsigned long start, unsigned long size, - unsigned long vmemmap_pages); + unsigned long vmemmap_pages, + struct memory_group *group); void remove_memory_block_devices(unsigned long start, unsigned long size); extern void memory_dev_init(void); extern int memory_notify(unsigned long val, void *v); -extern struct memory_block *find_memory_block(struct mem_section *); +extern struct memory_block *find_memory_block(unsigned long section_nr); typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *); extern int walk_memory_blocks(unsigned long start, unsigned long size, void *arg, walk_memory_blocks_func_t func); extern int for_each_memory_block(void *arg, walk_memory_blocks_func_t func); #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) + +extern int memory_group_register_static(int nid, unsigned long max_pages); +extern int memory_group_register_dynamic(int nid, unsigned long unit_pages); +extern int memory_group_unregister(int mgid); +struct memory_group *memory_group_find_by_id(int mgid); +typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *); +int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, + struct memory_group *excluded, void *arg); #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ #ifdef CONFIG_MEMORY_HOTPLUG diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index a7fd2c3ccb77..e5a867c950b2 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -12,6 +12,7 @@ struct zone; struct pglist_data; struct mem_section; struct memory_block; +struct memory_group; struct resource; struct vmem_altmap; @@ -50,6 +51,11 @@ typedef int __bitwise mhp_t; * Only selected architectures support it with SPARSE_VMEMMAP. */ #define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1)) +/* + * The nid field specifies a memory group id (mgid) instead. The memory group + * implies the node id (nid). + */ +#define MHP_NID_IS_MGID ((__force mhp_t)BIT(2)) /* * Extended parameters for memory hotplug: @@ -95,13 +101,15 @@ static inline void zone_seqlock_init(struct zone *zone) extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages); extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); -extern void adjust_present_page_count(struct zone *zone, long nr_pages); +extern void adjust_present_page_count(struct page *page, + struct memory_group *group, + long nr_pages); /* VM interface that may be used by firmware interface */ extern int mhp_init_memmap_on_memory(unsigned long pfn, unsigned long nr_pages, struct zone *zone); extern void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages); extern int online_pages(unsigned long pfn, unsigned long nr_pages, - struct zone *zone); + struct zone *zone, struct memory_group *group); extern struct zone *test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn); extern void __offline_isolated_pages(unsigned long start_pfn, @@ -130,8 +138,7 @@ static inline bool movable_node_is_enabled(void) return movable_node_enabled; } -extern void arch_remove_memory(int nid, u64 start, u64 size, - struct vmem_altmap *altmap); +extern void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap); extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, struct vmem_altmap *altmap); @@ -292,25 +299,27 @@ static inline void pgdat_resize_init(struct pglist_data *pgdat) {} #ifdef CONFIG_MEMORY_HOTREMOVE extern void try_offline_node(int nid); -extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); -extern int remove_memory(int nid, u64 start, u64 size); -extern void __remove_memory(int nid, u64 start, u64 size); -extern int offline_and_remove_memory(int nid, u64 start, u64 size); +extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, + struct memory_group *group); +extern int remove_memory(u64 start, u64 size); +extern void __remove_memory(u64 start, u64 size); +extern int offline_and_remove_memory(u64 start, u64 size); #else static inline void try_offline_node(int nid) {} -static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages) +static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, + struct memory_group *group) { return -EINVAL; } -static inline int remove_memory(int nid, u64 start, u64 size) +static inline int remove_memory(u64 start, u64 size) { return -EBUSY; } -static inline void __remove_memory(int nid, u64 start, u64 size) {} +static inline void __remove_memory(u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ extern void set_zone_contiguous(struct zone *zone); @@ -339,7 +348,8 @@ extern void sparse_remove_section(struct mem_section *ms, unsigned long map_offset, struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); -extern struct zone *zone_for_pfn_range(int online_type, int nid, unsigned start_pfn, +extern struct zone *zone_for_pfn_range(int online_type, int nid, + struct memory_group *group, unsigned long start_pfn, unsigned long nr_pages); extern int arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params); diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 0aaf91b496e2..4091692bed8c 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -184,6 +184,14 @@ extern bool vma_migratable(struct vm_area_struct *vma); extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); extern void mpol_put_task_policy(struct task_struct *); +extern bool numa_demotion_enabled; + +static inline bool mpol_is_preferred_many(struct mempolicy *pol) +{ + return (pol->mode == MPOL_PREFERRED_MANY); +} + + #else struct mempolicy {}; @@ -292,5 +300,13 @@ static inline nodemask_t *policy_nodemask_current(gfp_t gfp) { return NULL; } + +#define numa_demotion_enabled false + +static inline bool mpol_is_preferred_many(struct mempolicy *pol) +{ + return false; +} + #endif /* CONFIG_NUMA */ #endif diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h index e6ee2ec35de9..cbf9d7619493 100644 --- a/include/linux/mfd/dbx500-prcmu.h +++ b/include/linux/mfd/dbx500-prcmu.h @@ -186,10 +186,11 @@ enum ddr_pwrst { #define PRCMU_FW_PROJECT_U8500_C3 8 #define PRCMU_FW_PROJECT_U8500_C4 9 #define PRCMU_FW_PROJECT_U9500_MBL 10 -#define PRCMU_FW_PROJECT_U8500_MBL 11 /* Customer specific */ +#define PRCMU_FW_PROJECT_U8500_SSG1 11 /* Samsung specific */ #define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */ #define PRCMU_FW_PROJECT_U8520 13 #define PRCMU_FW_PROJECT_U8420 14 +#define PRCMU_FW_PROJECT_U8500_SSG2 15 /* Samsung specific */ #define PRCMU_FW_PROJECT_U8420_SYSCLK 17 #define PRCMU_FW_PROJECT_A9420 20 /* [32..63] 9540 and derivatives */ diff --git a/include/linux/mfd/hi6421-spmi-pmic.h b/include/linux/mfd/hi6421-spmi-pmic.h index 2660226138b8..e5b8dbf828b6 100644 --- a/include/linux/mfd/hi6421-spmi-pmic.h +++ b/include/linux/mfd/hi6421-spmi-pmic.h @@ -19,11 +19,6 @@ struct hi6421_spmi_pmic { struct resource *res; struct device *dev; void __iomem *regs; - spinlock_t lock; - struct irq_domain *domain; - int irq; - int gpio; - unsigned int *irqs; struct regmap *regmap; }; diff --git a/include/linux/mfd/idt82p33_reg.h b/include/linux/mfd/idt82p33_reg.h new file mode 100644 index 000000000000..129a6c078221 --- /dev/null +++ b/include/linux/mfd/idt82p33_reg.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Register Map - Based on AN888_SMUforIEEE_SynchEther_82P33xxx_RevH.pdf + * + * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company. + */ +#ifndef HAVE_IDT82P33_REG +#define HAVE_IDT82P33_REG + +/* Register address */ +#define DPLL1_TOD_CNFG 0x134 +#define DPLL2_TOD_CNFG 0x1B4 + +#define DPLL1_TOD_STS 0x10B +#define DPLL2_TOD_STS 0x18B + +#define DPLL1_TOD_TRIGGER 0x115 +#define DPLL2_TOD_TRIGGER 0x195 + +#define DPLL1_OPERATING_MODE_CNFG 0x120 +#define DPLL2_OPERATING_MODE_CNFG 0x1A0 + +#define DPLL1_HOLDOVER_FREQ_CNFG 0x12C +#define DPLL2_HOLDOVER_FREQ_CNFG 0x1AC + +#define DPLL1_PHASE_OFFSET_CNFG 0x143 +#define DPLL2_PHASE_OFFSET_CNFG 0x1C3 + +#define DPLL1_SYNC_EDGE_CNFG 0x140 +#define DPLL2_SYNC_EDGE_CNFG 0x1C0 + +#define DPLL1_INPUT_MODE_CNFG 0x116 +#define DPLL2_INPUT_MODE_CNFG 0x196 + +#define DPLL1_OPERATING_STS 0x102 +#define DPLL2_OPERATING_STS 0x182 + +#define DPLL1_CURRENT_FREQ_STS 0x103 +#define DPLL2_CURRENT_FREQ_STS 0x183 + +#define REG_SOFT_RESET 0X381 + +#define OUT_MUX_CNFG(outn) REG_ADDR(0x6, (0xC * (outn))) + +/* Register bit definitions */ +#define SYNC_TOD BIT(1) +#define PH_OFFSET_EN BIT(7) +#define SQUELCH_ENABLE BIT(5) + +/* Bit definitions for the DPLL_MODE register */ +#define PLL_MODE_SHIFT (0) +#define PLL_MODE_MASK (0x1F) +#define COMBO_MODE_EN BIT(5) +#define COMBO_MODE_SHIFT (6) +#define COMBO_MODE_MASK (0x3) + +/* Bit definitions for DPLL_OPERATING_STS register */ +#define OPERATING_STS_MASK (0x7) +#define OPERATING_STS_SHIFT (0x0) + +/* Bit definitions for DPLL_TOD_TRIGGER register */ +#define READ_TRIGGER_MASK (0xF) +#define READ_TRIGGER_SHIFT (0x0) +#define WRITE_TRIGGER_MASK (0xF0) +#define WRITE_TRIGGER_SHIFT (0x4) + +/* Bit definitions for REG_SOFT_RESET register */ +#define SOFT_RESET_EN BIT(7) + +enum pll_mode { + PLL_MODE_MIN = 0, + PLL_MODE_AUTOMATIC = PLL_MODE_MIN, + PLL_MODE_FORCE_FREERUN = 1, + PLL_MODE_FORCE_HOLDOVER = 2, + PLL_MODE_FORCE_LOCKED = 4, + PLL_MODE_FORCE_PRE_LOCKED2 = 5, + PLL_MODE_FORCE_PRE_LOCKED = 6, + PLL_MODE_FORCE_LOST_PHASE = 7, + PLL_MODE_DCO = 10, + PLL_MODE_WPH = 18, + PLL_MODE_MAX = PLL_MODE_WPH, +}; + +enum hw_tod_trig_sel { + HW_TOD_TRIG_SEL_MIN = 0, + HW_TOD_TRIG_SEL_NO_WRITE = HW_TOD_TRIG_SEL_MIN, + HW_TOD_TRIG_SEL_NO_READ = HW_TOD_TRIG_SEL_MIN, + HW_TOD_TRIG_SEL_SYNC_SEL = 1, + HW_TOD_TRIG_SEL_IN12 = 2, + HW_TOD_TRIG_SEL_IN13 = 3, + HW_TOD_TRIG_SEL_IN14 = 4, + HW_TOD_TRIG_SEL_TOD_PPS = 5, + HW_TOD_TRIG_SEL_TIMER_INTERVAL = 6, + HW_TOD_TRIG_SEL_MSB_PHASE_OFFSET_CNFG = 7, + HW_TOD_TRIG_SEL_MSB_HOLDOVER_FREQ_CNFG = 8, + HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG = 9, + HW_TOD_RD_TRIG_SEL_LSB_TOD_STS = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, + WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_MSB_TOD_CNFG, +}; + +/** @brief Enumerated type listing DPLL operational modes */ +enum dpll_state { + DPLL_STATE_FREERUN = 1, + DPLL_STATE_HOLDOVER = 2, + DPLL_STATE_LOCKED = 4, + DPLL_STATE_PRELOCKED2 = 5, + DPLL_STATE_PRELOCKED = 6, + DPLL_STATE_LOSTPHASE = 7, + DPLL_STATE_MAX +}; + +#endif diff --git a/include/linux/mfd/idt8a340_reg.h b/include/linux/mfd/idt8a340_reg.h new file mode 100644 index 000000000000..92d763230bdf --- /dev/null +++ b/include/linux/mfd/idt8a340_reg.h @@ -0,0 +1,729 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Based on 5.2.0, Family Programming Guide (Sept 30, 2020) + * + * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company. + */ +#ifndef HAVE_IDT8A340_REG +#define HAVE_IDT8A340_REG + +#define PAGE_ADDR_BASE 0x0000 +#define PAGE_ADDR 0x00fc + +#define HW_REVISION 0x8180 +#define REV_ID 0x007a + +#define HW_DPLL_0 (0x8a00) +#define HW_DPLL_1 (0x8b00) +#define HW_DPLL_2 (0x8c00) +#define HW_DPLL_3 (0x8d00) +#define HW_DPLL_4 (0x8e00) +#define HW_DPLL_5 (0x8f00) +#define HW_DPLL_6 (0x9000) +#define HW_DPLL_7 (0x9100) + +#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080) +#define HW_DPLL_TOD_CTRL_1 (0x089) +#define HW_DPLL_TOD_CTRL_2 (0x08A) +#define HW_DPLL_TOD_OVR__0 (0x098) +#define HW_DPLL_TOD_OUT_0__0 (0x0B0) + +#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740) +#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741) +#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742) +#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743) +#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744) +#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745) +#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746) +#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747) +#define HW_Q8_CH_SYNC_CTRL_0 (0xa748) +#define HW_Q8_CH_SYNC_CTRL_1 (0xa749) +#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a) +#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b) +#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c) +#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d) +#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e) +#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f) + +#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14 +#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15 +#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16 +#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17 + +#define SYNCTRL1_MASTER_SYNC_RST BIT(7) +#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5) +#define SYNCTRL1_TOD_SYNC_TRIG BIT(4) +#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3) +#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2) +#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1) +#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0) + +#define HW_Q8_CTRL_SPARE (0xa7d4) +#define HW_Q11_CTRL_SPARE (0xa7ec) + +/** + * Select FOD5 as sync_trigger for Q8 divider. + * Transition from logic zero to one + * sets trigger to sync Q8 divider. + * + * Unused when FOD4 is driving Q8 divider (normal operation). + */ +#define Q9_TO_Q8_SYNC_TRIG BIT(1) + +/** + * Enable FOD5 as driver for clock and sync for Q8 divider. + * Enable fanout buffer for FOD5. + * + * Unused when FOD4 is driving Q8 divider (normal operation). + */ +#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) + +/** + * Select FOD6 as sync_trigger for Q11 divider. + * Transition from logic zero to one + * sets trigger to sync Q11 divider. + * + * Unused when FOD7 is driving Q11 divider (normal operation). + */ +#define Q10_TO_Q11_SYNC_TRIG BIT(1) + +/** + * Enable FOD6 as driver for clock and sync for Q11 divider. + * Enable fanout buffer for FOD6. + * + * Unused when FOD7 is driving Q11 divider (normal operation). + */ +#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2)) + +#define RESET_CTRL 0xc000 +#define SM_RESET 0x0012 +#define SM_RESET_V520 0x0013 +#define SM_RESET_CMD 0x5A + +#define GENERAL_STATUS 0xc014 +#define BOOT_STATUS 0x0000 +#define HW_REV_ID 0x000A +#define BOND_ID 0x000B +#define HW_CSR_ID 0x000C +#define HW_IRQ_ID 0x000E +#define MAJ_REL 0x0010 +#define MIN_REL 0x0011 +#define HOTFIX_REL 0x0012 +#define PIPELINE_ID 0x0014 +#define BUILD_ID 0x0018 +#define JTAG_DEVICE_ID 0x001c +#define PRODUCT_ID 0x001e +#define OTP_SCSR_CONFIG_SELECT 0x0022 + +#define STATUS 0xc03c +#define DPLL0_STATUS 0x0018 +#define DPLL1_STATUS 0x0019 +#define DPLL2_STATUS 0x001a +#define DPLL3_STATUS 0x001b +#define DPLL4_STATUS 0x001c +#define DPLL5_STATUS 0x001d +#define DPLL6_STATUS 0x001e +#define DPLL7_STATUS 0x001f +#define DPLL_SYS_STATUS 0x0020 +#define DPLL_SYS_APLL_STATUS 0x0021 +#define DPLL0_FILTER_STATUS 0x0044 +#define DPLL1_FILTER_STATUS 0x004c +#define DPLL2_FILTER_STATUS 0x0054 +#define DPLL3_FILTER_STATUS 0x005c +#define DPLL4_FILTER_STATUS 0x0064 +#define DPLL5_FILTER_STATUS 0x006c +#define DPLL6_FILTER_STATUS 0x0074 +#define DPLL7_FILTER_STATUS 0x007c +#define DPLLSYS_FILTER_STATUS 0x0084 +#define USER_GPIO0_TO_7_STATUS 0x008a +#define USER_GPIO8_TO_15_STATUS 0x008b + +#define GPIO_USER_CONTROL 0xc160 +#define GPIO0_TO_7_OUT 0x0000 +#define GPIO8_TO_15_OUT 0x0001 +#define GPIO0_TO_7_OUT_V520 0x0002 +#define GPIO8_TO_15_OUT_V520 0x0003 + +#define STICKY_STATUS_CLEAR 0xc164 + +#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c + +#define ALERT_CFG 0xc188 + +#define SYS_DPLL_XO 0xc194 + +#define SYS_APLL 0xc19c + +#define INPUT_0 0xc1b0 +#define INPUT_1 0xc1c0 +#define INPUT_2 0xc1d0 +#define INPUT_3 0xc200 +#define INPUT_4 0xc210 +#define INPUT_5 0xc220 +#define INPUT_6 0xc230 +#define INPUT_7 0xc240 +#define INPUT_8 0xc250 +#define INPUT_9 0xc260 +#define INPUT_10 0xc280 +#define INPUT_11 0xc290 +#define INPUT_12 0xc2a0 +#define INPUT_13 0xc2b0 +#define INPUT_14 0xc2c0 +#define INPUT_15 0xc2d0 + +#define REF_MON_0 0xc2e0 +#define REF_MON_1 0xc2ec +#define REF_MON_2 0xc300 +#define REF_MON_3 0xc30c +#define REF_MON_4 0xc318 +#define REF_MON_5 0xc324 +#define REF_MON_6 0xc330 +#define REF_MON_7 0xc33c +#define REF_MON_8 0xc348 +#define REF_MON_9 0xc354 +#define REF_MON_10 0xc360 +#define REF_MON_11 0xc36c +#define REF_MON_12 0xc380 +#define REF_MON_13 0xc38c +#define REF_MON_14 0xc398 +#define REF_MON_15 0xc3a4 + +#define DPLL_0 0xc3b0 +#define DPLL_CTRL_REG_0 0x0002 +#define DPLL_CTRL_REG_1 0x0003 +#define DPLL_CTRL_REG_2 0x0004 +#define DPLL_TOD_SYNC_CFG 0x0031 +#define DPLL_COMBO_SLAVE_CFG_0 0x0032 +#define DPLL_COMBO_SLAVE_CFG_1 0x0033 +#define DPLL_SLAVE_REF_CFG 0x0034 +#define DPLL_REF_MODE 0x0035 +#define DPLL_PHASE_MEASUREMENT_CFG 0x0036 +#define DPLL_MODE 0x0037 +#define DPLL_MODE_V520 0x003B +#define DPLL_1 0xc400 +#define DPLL_2 0xc438 +#define DPLL_2_V520 0xc43c +#define DPLL_3 0xc480 +#define DPLL_4 0xc4b8 +#define DPLL_4_V520 0xc4bc +#define DPLL_5 0xc500 +#define DPLL_6 0xc538 +#define DPLL_6_V520 0xc53c +#define DPLL_7 0xc580 +#define SYS_DPLL 0xc5b8 +#define SYS_DPLL_V520 0xc5bc + +#define DPLL_CTRL_0 0xc600 +#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001 +#define DPLL_CTRL_DPLL_FOD_FREQ 0x001c +#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a +#define DPLL_CTRL_1 0xc63c +#define DPLL_CTRL_2 0xc680 +#define DPLL_CTRL_3 0xc6bc +#define DPLL_CTRL_4 0xc700 +#define DPLL_CTRL_5 0xc73c +#define DPLL_CTRL_6 0xc780 +#define DPLL_CTRL_7 0xc7bc +#define SYS_DPLL_CTRL 0xc800 + +#define DPLL_PHASE_0 0xc818 +/* Signed 42-bit FFO in units of 2^(-53) */ +#define DPLL_WR_PHASE 0x0000 +#define DPLL_PHASE_1 0xc81c +#define DPLL_PHASE_2 0xc820 +#define DPLL_PHASE_3 0xc824 +#define DPLL_PHASE_4 0xc828 +#define DPLL_PHASE_5 0xc82c +#define DPLL_PHASE_6 0xc830 +#define DPLL_PHASE_7 0xc834 + +#define DPLL_FREQ_0 0xc838 +/* Signed 42-bit FFO in units of 2^(-53) */ +#define DPLL_WR_FREQ 0x0000 +#define DPLL_FREQ_1 0xc840 +#define DPLL_FREQ_2 0xc848 +#define DPLL_FREQ_3 0xc850 +#define DPLL_FREQ_4 0xc858 +#define DPLL_FREQ_5 0xc860 +#define DPLL_FREQ_6 0xc868 +#define DPLL_FREQ_7 0xc870 + +#define DPLL_PHASE_PULL_IN_0 0xc880 +#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */ +#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */ +#define PULL_IN_CTRL 0x0007 +#define DPLL_PHASE_PULL_IN_1 0xc888 +#define DPLL_PHASE_PULL_IN_2 0xc890 +#define DPLL_PHASE_PULL_IN_3 0xc898 +#define DPLL_PHASE_PULL_IN_4 0xc8a0 +#define DPLL_PHASE_PULL_IN_5 0xc8a8 +#define DPLL_PHASE_PULL_IN_6 0xc8b0 +#define DPLL_PHASE_PULL_IN_7 0xc8b8 + +#define GPIO_CFG 0xc8c0 +#define GPIO_CFG_GBL 0x0000 +#define GPIO_0 0xc8c2 +#define GPIO_DCO_INC_DEC 0x0000 +#define GPIO_OUT_CTRL_0 0x0001 +#define GPIO_OUT_CTRL_1 0x0002 +#define GPIO_TOD_TRIG 0x0003 +#define GPIO_DPLL_INDICATOR 0x0004 +#define GPIO_LOS_INDICATOR 0x0005 +#define GPIO_REF_INPUT_DSQ_0 0x0006 +#define GPIO_REF_INPUT_DSQ_1 0x0007 +#define GPIO_REF_INPUT_DSQ_2 0x0008 +#define GPIO_REF_INPUT_DSQ_3 0x0009 +#define GPIO_MAN_CLK_SEL_0 0x000a +#define GPIO_MAN_CLK_SEL_1 0x000b +#define GPIO_MAN_CLK_SEL_2 0x000c +#define GPIO_SLAVE 0x000d +#define GPIO_ALERT_OUT_CFG 0x000e +#define GPIO_TOD_NOTIFICATION_CFG 0x000f +#define GPIO_CTRL 0x0010 +#define GPIO_CTRL_V520 0x0011 +#define GPIO_1 0xc8d4 +#define GPIO_2 0xc8e6 +#define GPIO_3 0xc900 +#define GPIO_4 0xc912 +#define GPIO_5 0xc924 +#define GPIO_6 0xc936 +#define GPIO_7 0xc948 +#define GPIO_8 0xc95a +#define GPIO_9 0xc980 +#define GPIO_10 0xc992 +#define GPIO_11 0xc9a4 +#define GPIO_12 0xc9b6 +#define GPIO_13 0xc9c8 +#define GPIO_14 0xc9da +#define GPIO_15 0xca00 + +#define OUT_DIV_MUX 0xca12 +#define OUTPUT_0 0xca14 +#define OUTPUT_0_V520 0xca20 +/* FOD frequency output divider value */ +#define OUT_DIV 0x0000 +#define OUT_DUTY_CYCLE_HIGH 0x0004 +#define OUT_CTRL_0 0x0008 +#define OUT_CTRL_1 0x0009 +/* Phase adjustment in FOD cycles */ +#define OUT_PHASE_ADJ 0x000c +#define OUTPUT_1 0xca24 +#define OUTPUT_1_V520 0xca30 +#define OUTPUT_2 0xca34 +#define OUTPUT_2_V520 0xca40 +#define OUTPUT_3 0xca44 +#define OUTPUT_3_V520 0xca50 +#define OUTPUT_4 0xca54 +#define OUTPUT_4_V520 0xca60 +#define OUTPUT_5 0xca64 +#define OUTPUT_5_V520 0xca80 +#define OUTPUT_6 0xca80 +#define OUTPUT_6_V520 0xca90 +#define OUTPUT_7 0xca90 +#define OUTPUT_7_V520 0xcaa0 +#define OUTPUT_8 0xcaa0 +#define OUTPUT_8_V520 0xcab0 +#define OUTPUT_9 0xcab0 +#define OUTPUT_9_V520 0xcac0 +#define OUTPUT_10 0xcac0 +#define OUTPUT_10_V520 0xcad0 +#define OUTPUT_11 0xcad0 +#define OUTPUT_11_V520 0xcae0 + +#define SERIAL 0xcae0 +#define SERIAL_V520 0xcaf0 + +#define PWM_ENCODER_0 0xcb00 +#define PWM_ENCODER_1 0xcb08 +#define PWM_ENCODER_2 0xcb10 +#define PWM_ENCODER_3 0xcb18 +#define PWM_ENCODER_4 0xcb20 +#define PWM_ENCODER_5 0xcb28 +#define PWM_ENCODER_6 0xcb30 +#define PWM_ENCODER_7 0xcb38 +#define PWM_DECODER_0 0xcb40 +#define PWM_DECODER_1 0xcb48 +#define PWM_DECODER_1_V520 0xcb4a +#define PWM_DECODER_2 0xcb50 +#define PWM_DECODER_2_V520 0xcb54 +#define PWM_DECODER_3 0xcb58 +#define PWM_DECODER_3_V520 0xcb5e +#define PWM_DECODER_4 0xcb60 +#define PWM_DECODER_4_V520 0xcb68 +#define PWM_DECODER_5 0xcb68 +#define PWM_DECODER_5_V520 0xcb80 +#define PWM_DECODER_6 0xcb70 +#define PWM_DECODER_6_V520 0xcb8a +#define PWM_DECODER_7 0xcb80 +#define PWM_DECODER_7_V520 0xcb94 +#define PWM_DECODER_8 0xcb88 +#define PWM_DECODER_8_V520 0xcb9e +#define PWM_DECODER_9 0xcb90 +#define PWM_DECODER_9_V520 0xcba8 +#define PWM_DECODER_10 0xcb98 +#define PWM_DECODER_10_V520 0xcbb2 +#define PWM_DECODER_11 0xcba0 +#define PWM_DECODER_11_V520 0xcbbc +#define PWM_DECODER_12 0xcba8 +#define PWM_DECODER_12_V520 0xcbc6 +#define PWM_DECODER_13 0xcbb0 +#define PWM_DECODER_13_V520 0xcbd0 +#define PWM_DECODER_14 0xcbb8 +#define PWM_DECODER_14_V520 0xcbda +#define PWM_DECODER_15 0xcbc0 +#define PWM_DECODER_15_V520 0xcbe4 +#define PWM_USER_DATA 0xcbc8 +#define PWM_USER_DATA_V520 0xcbf0 + +#define TOD_0 0xcbcc +#define TOD_0_V520 0xcc00 +/* Enable TOD counter, output channel sync and even-PPS mode */ +#define TOD_CFG 0x0000 +#define TOD_CFG_V520 0x0001 +#define TOD_1 0xcbce +#define TOD_1_V520 0xcc02 +#define TOD_2 0xcbd0 +#define TOD_2_V520 0xcc04 +#define TOD_3 0xcbd2 +#define TOD_3_V520 0xcc06 + +#define TOD_WRITE_0 0xcc00 +#define TOD_WRITE_0_V520 0xcc10 +/* 8-bit subns, 32-bit ns, 48-bit seconds */ +#define TOD_WRITE 0x0000 +/* Counter increments after TOD write is completed */ +#define TOD_WRITE_COUNTER 0x000c +/* TOD write trigger configuration */ +#define TOD_WRITE_SELECT_CFG_0 0x000d +/* TOD write trigger selection */ +#define TOD_WRITE_CMD 0x000f +#define TOD_WRITE_1 0xcc10 +#define TOD_WRITE_1_V520 0xcc20 +#define TOD_WRITE_2 0xcc20 +#define TOD_WRITE_2_V520 0xcc30 +#define TOD_WRITE_3 0xcc30 +#define TOD_WRITE_3_V520 0xcc40 + +#define TOD_READ_PRIMARY_0 0xcc40 +#define TOD_READ_PRIMARY_0_V520 0xcc50 +/* 8-bit subns, 32-bit ns, 48-bit seconds */ +#define TOD_READ_PRIMARY 0x0000 +/* Counter increments after TOD write is completed */ +#define TOD_READ_PRIMARY_COUNTER 0x000b +/* Read trigger configuration */ +#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c +/* Read trigger selection */ +#define TOD_READ_PRIMARY_CMD 0x000e +#define TOD_READ_PRIMARY_CMD_V520 0x000f +#define TOD_READ_PRIMARY_1 0xcc50 +#define TOD_READ_PRIMARY_1_V520 0xcc60 +#define TOD_READ_PRIMARY_2 0xcc60 +#define TOD_READ_PRIMARY_2_V520 0xcc80 +#define TOD_READ_PRIMARY_3 0xcc80 +#define TOD_READ_PRIMARY_3_V520 0xcc90 + +#define TOD_READ_SECONDARY_0 0xcc90 +#define TOD_READ_SECONDARY_0_V520 0xcca0 +#define TOD_READ_SECONDARY_1 0xcca0 +#define TOD_READ_SECONDARY_1_V520 0xccb0 +#define TOD_READ_SECONDARY_2 0xccb0 +#define TOD_READ_SECONDARY_2_V520 0xccc0 +#define TOD_READ_SECONDARY_3 0xccc0 +#define TOD_READ_SECONDARY_3_V520 0xccd0 + +#define OUTPUT_TDC_CFG 0xccd0 +#define OUTPUT_TDC_CFG_V520 0xcce0 +#define OUTPUT_TDC_0 0xcd00 +#define OUTPUT_TDC_1 0xcd08 +#define OUTPUT_TDC_2 0xcd10 +#define OUTPUT_TDC_3 0xcd18 +#define INPUT_TDC 0xcd20 + +#define SCRATCH 0xcf50 +#define SCRATCH_V520 0xcf4c + +#define EEPROM 0xcf68 +#define EEPROM_V520 0xcf64 + +#define OTP 0xcf70 + +#define BYTE 0xcf80 + +/* Bit definitions for the MAJ_REL register */ +#define MAJOR_SHIFT (1) +#define MAJOR_MASK (0x7f) +#define PR_BUILD BIT(0) + +/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */ +#define GPIO0_LEVEL BIT(0) +#define GPIO1_LEVEL BIT(1) +#define GPIO2_LEVEL BIT(2) +#define GPIO3_LEVEL BIT(3) +#define GPIO4_LEVEL BIT(4) +#define GPIO5_LEVEL BIT(5) +#define GPIO6_LEVEL BIT(6) +#define GPIO7_LEVEL BIT(7) + +/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */ +#define GPIO8_LEVEL BIT(0) +#define GPIO9_LEVEL BIT(1) +#define GPIO10_LEVEL BIT(2) +#define GPIO11_LEVEL BIT(3) +#define GPIO12_LEVEL BIT(4) +#define GPIO13_LEVEL BIT(5) +#define GPIO14_LEVEL BIT(6) +#define GPIO15_LEVEL BIT(7) + +/* Bit definitions for the GPIO0_TO_7_OUT register */ +#define GPIO0_DRIVE_LEVEL BIT(0) +#define GPIO1_DRIVE_LEVEL BIT(1) +#define GPIO2_DRIVE_LEVEL BIT(2) +#define GPIO3_DRIVE_LEVEL BIT(3) +#define GPIO4_DRIVE_LEVEL BIT(4) +#define GPIO5_DRIVE_LEVEL BIT(5) +#define GPIO6_DRIVE_LEVEL BIT(6) +#define GPIO7_DRIVE_LEVEL BIT(7) + +/* Bit definitions for the GPIO8_TO_15_OUT register */ +#define GPIO8_DRIVE_LEVEL BIT(0) +#define GPIO9_DRIVE_LEVEL BIT(1) +#define GPIO10_DRIVE_LEVEL BIT(2) +#define GPIO11_DRIVE_LEVEL BIT(3) +#define GPIO12_DRIVE_LEVEL BIT(4) +#define GPIO13_DRIVE_LEVEL BIT(5) +#define GPIO14_DRIVE_LEVEL BIT(6) +#define GPIO15_DRIVE_LEVEL BIT(7) + +/* Bit definitions for the DPLL_TOD_SYNC_CFG register */ +#define TOD_SYNC_SOURCE_SHIFT (1) +#define TOD_SYNC_SOURCE_MASK (0x3) +#define TOD_SYNC_EN BIT(0) + +/* Bit definitions for the DPLL_MODE register */ +#define WRITE_TIMER_MODE BIT(6) +#define PLL_MODE_SHIFT (3) +#define PLL_MODE_MASK (0x7) +#define STATE_MODE_SHIFT (0) +#define STATE_MODE_MASK (0x7) + +/* Bit definitions for the GPIO_CFG_GBL register */ +#define SUPPLY_MODE_SHIFT (0) +#define SUPPLY_MODE_MASK (0x3) + +/* Bit definitions for the GPIO_DCO_INC_DEC register */ +#define INCDEC_DPLL_INDEX_SHIFT (0) +#define INCDEC_DPLL_INDEX_MASK (0x7) + +/* Bit definitions for the GPIO_OUT_CTRL_0 register */ +#define CTRL_OUT_0 BIT(0) +#define CTRL_OUT_1 BIT(1) +#define CTRL_OUT_2 BIT(2) +#define CTRL_OUT_3 BIT(3) +#define CTRL_OUT_4 BIT(4) +#define CTRL_OUT_5 BIT(5) +#define CTRL_OUT_6 BIT(6) +#define CTRL_OUT_7 BIT(7) + +/* Bit definitions for the GPIO_OUT_CTRL_1 register */ +#define CTRL_OUT_8 BIT(0) +#define CTRL_OUT_9 BIT(1) +#define CTRL_OUT_10 BIT(2) +#define CTRL_OUT_11 BIT(3) +#define CTRL_OUT_12 BIT(4) +#define CTRL_OUT_13 BIT(5) +#define CTRL_OUT_14 BIT(6) +#define CTRL_OUT_15 BIT(7) + +/* Bit definitions for the GPIO_TOD_TRIG register */ +#define TOD_TRIG_0 BIT(0) +#define TOD_TRIG_1 BIT(1) +#define TOD_TRIG_2 BIT(2) +#define TOD_TRIG_3 BIT(3) + +/* Bit definitions for the GPIO_DPLL_INDICATOR register */ +#define IND_DPLL_INDEX_SHIFT (0) +#define IND_DPLL_INDEX_MASK (0x7) + +/* Bit definitions for the GPIO_LOS_INDICATOR register */ +#define REFMON_INDEX_SHIFT (0) +#define REFMON_INDEX_MASK (0xf) +/* Active level of LOS indicator, 0=low 1=high */ +#define ACTIVE_LEVEL BIT(4) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */ +#define DSQ_INP_0 BIT(0) +#define DSQ_INP_1 BIT(1) +#define DSQ_INP_2 BIT(2) +#define DSQ_INP_3 BIT(3) +#define DSQ_INP_4 BIT(4) +#define DSQ_INP_5 BIT(5) +#define DSQ_INP_6 BIT(6) +#define DSQ_INP_7 BIT(7) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */ +#define DSQ_INP_8 BIT(0) +#define DSQ_INP_9 BIT(1) +#define DSQ_INP_10 BIT(2) +#define DSQ_INP_11 BIT(3) +#define DSQ_INP_12 BIT(4) +#define DSQ_INP_13 BIT(5) +#define DSQ_INP_14 BIT(6) +#define DSQ_INP_15 BIT(7) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */ +#define DSQ_DPLL_0 BIT(0) +#define DSQ_DPLL_1 BIT(1) +#define DSQ_DPLL_2 BIT(2) +#define DSQ_DPLL_3 BIT(3) +#define DSQ_DPLL_4 BIT(4) +#define DSQ_DPLL_5 BIT(5) +#define DSQ_DPLL_6 BIT(6) +#define DSQ_DPLL_7 BIT(7) + +/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */ +#define DSQ_DPLL_SYS BIT(0) +#define GPIO_DSQ_LEVEL BIT(1) + +/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */ +#define DPLL_TOD_SHIFT (0) +#define DPLL_TOD_MASK (0x3) +#define TOD_READ_SECONDARY BIT(2) +#define GPIO_ASSERT_LEVEL BIT(3) + +/* Bit definitions for the GPIO_CTRL register */ +#define GPIO_FUNCTION_EN BIT(0) +#define GPIO_CMOS_OD_MODE BIT(1) +#define GPIO_CONTROL_DIR BIT(2) +#define GPIO_PU_PD_MODE BIT(3) +#define GPIO_FUNCTION_SHIFT (4) +#define GPIO_FUNCTION_MASK (0xf) + +/* Bit definitions for the OUT_CTRL_1 register */ +#define OUT_SYNC_DISABLE BIT(7) +#define SQUELCH_VALUE BIT(6) +#define SQUELCH_DISABLE BIT(5) +#define PAD_VDDO_SHIFT (2) +#define PAD_VDDO_MASK (0x7) +#define PAD_CMOSDRV_SHIFT (0) +#define PAD_CMOSDRV_MASK (0x3) + +/* Bit definitions for the TOD_CFG register */ +#define TOD_EVEN_PPS_MODE BIT(2) +#define TOD_OUT_SYNC_ENABLE BIT(1) +#define TOD_ENABLE BIT(0) + +/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */ +#define WR_PWM_DECODER_INDEX_SHIFT (4) +#define WR_PWM_DECODER_INDEX_MASK (0xf) +#define WR_REF_INDEX_SHIFT (0) +#define WR_REF_INDEX_MASK (0xf) + +/* Bit definitions for the TOD_WRITE_CMD register */ +#define TOD_WRITE_SELECTION_SHIFT (0) +#define TOD_WRITE_SELECTION_MASK (0xf) +/* 4.8.7 */ +#define TOD_WRITE_TYPE_SHIFT (4) +#define TOD_WRITE_TYPE_MASK (0x3) + +/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */ +#define RD_PWM_DECODER_INDEX_SHIFT (4) +#define RD_PWM_DECODER_INDEX_MASK (0xf) +#define RD_REF_INDEX_SHIFT (0) +#define RD_REF_INDEX_MASK (0xf) + +/* Bit definitions for the TOD_READ_PRIMARY_CMD register */ +#define TOD_READ_TRIGGER_MODE BIT(4) +#define TOD_READ_TRIGGER_SHIFT (0) +#define TOD_READ_TRIGGER_MASK (0xf) + +/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */ +#define COMBO_MASTER_HOLD BIT(0) + +/* Bit definitions for DPLL_SYS_STATUS register */ +#define DPLL_SYS_STATE_MASK (0xf) + +/* Bit definitions for SYS_APLL_STATUS register */ +#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0) +#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0 +#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1 + +/* Bit definitions for the DPLL0_STATUS register */ +#define DPLL_STATE_MASK (0xf) +#define DPLL_STATE_SHIFT (0x0) + +/* Values of DPLL_N.DPLL_MODE.PLL_MODE */ +enum pll_mode { + PLL_MODE_MIN = 0, + PLL_MODE_NORMAL = PLL_MODE_MIN, + PLL_MODE_WRITE_PHASE = 1, + PLL_MODE_WRITE_FREQUENCY = 2, + PLL_MODE_GPIO_INC_DEC = 3, + PLL_MODE_SYNTHESIS = 4, + PLL_MODE_PHASE_MEASUREMENT = 5, + PLL_MODE_DISABLED = 6, + PLL_MODE_MAX = PLL_MODE_DISABLED, +}; + +enum hw_tod_write_trig_sel { + HW_TOD_WR_TRIG_SEL_MIN = 0, + HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN, + HW_TOD_WR_TRIG_SEL_RESERVED = 1, + HW_TOD_WR_TRIG_SEL_TOD_PPS = 2, + HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3, + HW_TOD_WR_TRIG_SEL_PWM_PPS = 4, + HW_TOD_WR_TRIG_SEL_GPIO = 5, + HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6, + WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC, +}; + +enum scsr_read_trig_sel { + /* CANCEL CURRENT TOD READ; MODULE BECOMES IDLE - NO TRIGGER OCCURS */ + SCSR_TOD_READ_TRIG_SEL_DISABLE = 0, + /* TRIGGER IMMEDIATELY */ + SCSR_TOD_READ_TRIG_SEL_IMMEDIATE = 1, + /* TRIGGER ON RISING EDGE OF INTERNAL TOD PPS SIGNAL */ + SCSR_TOD_READ_TRIG_SEL_TODPPS = 2, + /* TRGGER ON RISING EDGE OF SELECTED REFERENCE INPUT */ + SCSR_TOD_READ_TRIG_SEL_REFCLK = 3, + /* TRIGGER ON RISING EDGE OF SELECTED PWM DECODER 1PPS OUTPUT */ + SCSR_TOD_READ_TRIG_SEL_PWMPPS = 4, + SCSR_TOD_READ_TRIG_SEL_RESERVED = 5, + /* TRIGGER WHEN WRITE FREQUENCY EVENT OCCURS */ + SCSR_TOD_READ_TRIG_SEL_WRITEFREQUENCYEVENT = 6, + /* TRIGGER ON SELECTED GPIO */ + SCSR_TOD_READ_TRIG_SEL_GPIO = 7, + SCSR_TOD_READ_TRIG_SEL_MAX = SCSR_TOD_READ_TRIG_SEL_GPIO, +}; + +/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */ +enum dpll_state { + DPLL_STATE_MIN = 0, + DPLL_STATE_FREERUN = DPLL_STATE_MIN, + DPLL_STATE_LOCKACQ = 1, + DPLL_STATE_LOCKREC = 2, + DPLL_STATE_LOCKED = 3, + DPLL_STATE_HOLDOVER = 4, + DPLL_STATE_OPEN_LOOP = 5, + DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP, +}; + +/* 4.8.7 only */ +enum scsr_tod_write_trig_sel { + SCSR_TOD_WR_TRIG_SEL_DISABLE = 0, + SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1, + SCSR_TOD_WR_TRIG_SEL_REFCLK = 2, + SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3, + SCSR_TOD_WR_TRIG_SEL_TODPPS = 4, + SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5, + SCSR_TOD_WR_TRIG_SEL_GPIO = 6, + SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO, +}; + +/* 4.8.7 only */ +enum scsr_tod_write_type_sel { + SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0, + SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1, + SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2, + SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS, +}; +#endif diff --git a/include/linux/mfd/rsmu.h b/include/linux/mfd/rsmu.h new file mode 100644 index 000000000000..6870de608233 --- /dev/null +++ b/include/linux/mfd/rsmu.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Core interface for Renesas Synchronization Management Unit (SMU) devices. + * + * Copyright (C) 2021 Integrated Device Technology, Inc., a Renesas Company. + */ + +#ifndef __LINUX_MFD_RSMU_H +#define __LINUX_MFD_RSMU_H + +/* The supported devices are ClockMatrix, Sabre and SnowLotus */ +enum rsmu_type { + RSMU_CM = 0x34000, + RSMU_SABRE = 0x33810, + RSMU_SL = 0x19850, +}; + +/** + * + * struct rsmu_ddata - device data structure for sub devices. + * + * @dev: i2c/spi device. + * @regmap: i2c/spi bus access. + * @lock: mutex used by sub devices to make sure a series of + * bus access requests are not interrupted. + * @type: RSMU device type. + * @page: i2c/spi bus driver internal use only. + */ +struct rsmu_ddata { + struct device *dev; + struct regmap *regmap; + struct mutex lock; + enum rsmu_type type; + u16 page; +}; +#endif /* __LINUX_MFD_RSMU_H */ diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h index 40a0c2dfb80f..2d1895c3efbf 100644 --- a/include/linux/mfd/rt5033-private.h +++ b/include/linux/mfd/rt5033-private.h @@ -200,13 +200,13 @@ enum rt5033_reg { #define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U #define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U #define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U -#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 21 +#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32 /* RT5033 regulator LDO output voltage uV */ #define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U #define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U #define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U -#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 19 +#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32 /* RT5033 regulator SAFE LDO output voltage uV */ #define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 944aa3aa3035..723985879035 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -303,6 +303,7 @@ struct mhi_controller_config { * @rddm_size: RAM dump size that host should allocate for debugging purpose * @sbl_size: SBL image size downloaded through BHIe (optional) * @seg_len: BHIe vector size (optional) + * @reg_len: Length of the MHI MMIO region (required) * @fbc_image: Points to firmware image buffer * @rddm_image: Points to RAM dump buffer * @mhi_chan: Points to the channel configuration table @@ -356,6 +357,7 @@ struct mhi_controller_config { * @fbc_download: MHI host needs to do complete image transfer (optional) * @wake_set: Device wakeup set flag * @irq_flags: irq flags passed to request_irq (optional) + * @mru: the default MRU for the MHI device * * Fields marked as (required) need to be populated by the controller driver * before calling mhi_register_controller(). For the fields marked as (optional) @@ -386,6 +388,7 @@ struct mhi_controller { size_t rddm_size; size_t sbl_size; size_t seg_len; + size_t reg_len; struct image_info *fbc_image; struct image_info *rddm_image; struct mhi_chan *mhi_chan; @@ -448,6 +451,7 @@ struct mhi_controller { bool fbc_download; bool wake_set; unsigned long irq_flags; + u32 mru; }; /** diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 23dadf7aeba8..326250996b4e 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -28,6 +28,7 @@ enum migrate_reason { MR_NUMA_MISPLACED, MR_CONTIG_RANGE, MR_LONGTERM_PIN, + MR_DEMOTION, MR_TYPES }; @@ -41,7 +42,8 @@ extern int migrate_page(struct address_space *mapping, struct page *newpage, struct page *page, enum migrate_mode mode); extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, - unsigned long private, enum migrate_mode mode, int reason); + unsigned long private, enum migrate_mode mode, int reason, + unsigned int *ret_succeeded); extern struct page *alloc_migration_target(struct page *page, unsigned long private); extern int isolate_movable_page(struct page *page, isolate_mode_t mode); @@ -56,7 +58,7 @@ extern int migrate_page_move_mapping(struct address_space *mapping, static inline void putback_movable_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, unsigned long private, enum migrate_mode mode, - int reason) + int reason, unsigned int *ret_succeeded) { return -ENOSYS; } static inline struct page *alloc_migration_target(struct page *page, unsigned long private) @@ -166,6 +168,14 @@ struct migrate_vma { int migrate_vma_setup(struct migrate_vma *args); void migrate_vma_pages(struct migrate_vma *migrate); void migrate_vma_finalize(struct migrate_vma *migrate); +int next_demotion_node(int node); + +#else /* CONFIG_MIGRATION disabled: */ + +static inline int next_demotion_node(int node) +{ + return NUMA_NO_NODE; +} #endif /* CONFIG_MIGRATION */ diff --git a/include/linux/mii.h b/include/linux/mii.h index 219b93cad1dd..12ea29e04293 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -32,7 +32,7 @@ struct mii_if_info { extern int mii_link_ok (struct mii_if_info *mii); extern int mii_nway_restart (struct mii_if_info *mii); -extern int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); +extern void mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); extern void mii_ethtool_get_link_ksettings( struct mii_if_info *mii, struct ethtool_link_ksettings *cmd); extern int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd); diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 0025913505ab..66eaf0aa7f69 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -1038,7 +1038,7 @@ enum { struct mlx5_mkey_seg { /* This is a two bit field occupying bits 31-30. * bit 31 is always 0, - * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation + * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation */ u8 status; u8 pcie_control; @@ -1157,6 +1157,9 @@ enum mlx5_cap_mode { HCA_CAP_OPMOD_GET_CUR = 1, }; +/* Any new cap addition must update mlx5_hca_caps_alloc() to allocate + * capability memory. + */ enum mlx5_cap_type { MLX5_CAP_GENERAL = 0, MLX5_CAP_ETHERNET_OFFLOADS, @@ -1213,55 +1216,55 @@ enum mlx5_qcam_feature_groups { /* GET Dev Caps macros */ #define MLX5_CAP_GEN(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) #define MLX5_CAP_GEN_64(mdev, cap) \ - MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) + MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap) #define MLX5_CAP_GEN_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap) + MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap) #define MLX5_CAP_GEN_2(mdev, cap) \ - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) #define MLX5_CAP_GEN_2_64(mdev, cap) \ - MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca_cur[MLX5_CAP_GENERAL_2], cap) + MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap) #define MLX5_CAP_GEN_2_MAX(mdev, cap) \ - MLX5_GET(cmd_hca_cap_2, mdev->caps.hca_max[MLX5_CAP_GENERAL_2], cap) + MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap) #define MLX5_CAP_ETH(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap) #define MLX5_CAP_ETH_MAX(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap) #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \ MLX5_GET(per_protocol_networking_offload_caps,\ - mdev->caps.hca_cur[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS], cap) + mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap) #define MLX5_CAP_ROCE(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap) #define MLX5_CAP_ROCE_MAX(mdev, cap) \ - MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap) + MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap) #define MLX5_CAP_ATOMIC(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap) #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ - MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap) + MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap) #define MLX5_CAP_FLOWTABLE(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) #define MLX5_CAP64_FLOWTABLE(mdev, cap) \ - MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap) #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ - MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap) + MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap) #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) @@ -1301,11 +1304,11 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ MLX5_GET(flow_table_eswitch_cap, \ - mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap) #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) @@ -1327,31 +1330,31 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ESW(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap) #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \ MLX5_GET64(flow_table_eswitch_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) + (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap) #define MLX5_CAP_ESW_MAX(mdev, cap) \ MLX5_GET(e_switch_cap, \ - mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap) + mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap) #define MLX5_CAP_ODP(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) #define MLX5_CAP_ODP_MAX(mdev, cap)\ - MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap) #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ MLX5_GET(vector_calc_cap, \ - mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) + mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap) #define MLX5_CAP_QOS(mdev, cap)\ - MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap) + MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap) #define MLX5_CAP_DEBUG(mdev, cap)\ - MLX5_GET(debug_cap, mdev->caps.hca_cur[MLX5_CAP_DEBUG], cap) + MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap) #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld) @@ -1387,27 +1390,27 @@ enum mlx5_qcam_feature_groups { MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap) #define MLX5_CAP_DEV_MEM(mdev, cap)\ - MLX5_GET(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) #define MLX5_CAP64_DEV_MEM(mdev, cap)\ - MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) + MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap) #define MLX5_CAP_TLS(mdev, cap) \ - MLX5_GET(tls_cap, (mdev)->caps.hca_cur[MLX5_CAP_TLS], cap) + MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap) #define MLX5_CAP_DEV_EVENT(mdev, cap)\ - MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap) #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\ MLX5_GET(virtio_emulation_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\ MLX5_GET64(virtio_emulation_cap, \ - (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap) + (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap) #define MLX5_CAP_IPSEC(mdev, cap)\ - MLX5_GET(ipsec_cap, (mdev)->caps.hca_cur[MLX5_CAP_IPSEC], cap) + MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap) enum { MLX5_CMD_STAT_OK = 0x0, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 1efe37466969..e23417424373 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -581,7 +581,7 @@ struct mlx5_priv { /* end: qp staff */ /* start: alloc staff */ - /* protect buffer alocation according to numa node */ + /* protect buffer allocation according to numa node */ struct mutex alloc_mutex; int numa_node; @@ -623,8 +623,7 @@ struct mlx5_priv { }; enum mlx5_device_state { - MLX5_DEVICE_STATE_UNINITIALIZED, - MLX5_DEVICE_STATE_UP, + MLX5_DEVICE_STATE_UP = 1, MLX5_DEVICE_STATE_INTERNAL_ERROR, }; @@ -730,6 +729,11 @@ struct mlx5_profile { } mr_cache[MAX_MR_CACHE_ENTRIES]; }; +struct mlx5_hca_cap { + u32 cur[MLX5_UN_SZ_DW(hca_cap_union)]; + u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; +}; + struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; @@ -741,8 +745,7 @@ struct mlx5_core_dev { char board_id[MLX5_BOARD_ID_LEN]; struct mlx5_cmd cmd; struct { - u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; - u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)]; + struct mlx5_hca_cap *hca[MLX5_CAP_NUM]; u32 pcam[MLX5_ST_SZ_DW(pcam_reg)]; u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)]; u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; @@ -1044,8 +1047,7 @@ void mlx5_unregister_debugfs(void); void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas); void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm); void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas); -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn); +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn); int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn); @@ -1111,7 +1113,7 @@ static inline u8 mlx5_mkey_variant(u32 mkey) } /* Async-atomic event notifier used by mlx5 core to forward FW - * evetns recived from event queue to mlx5 consumers. + * evetns received from event queue to mlx5 consumers. * Optimise event queue dipatching. */ int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb); @@ -1138,6 +1140,8 @@ bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); bool mlx5_lag_is_active(struct mlx5_core_dev *dev); +bool mlx5_lag_is_master(struct mlx5_core_dev *dev); +bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, struct net_device *slave); @@ -1145,6 +1149,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, size_t *offsets); +struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev); struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev); void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up); int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index bc7db2e059eb..4ab5c1fc1270 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h @@ -29,11 +29,20 @@ enum { REP_LOADED, }; +enum mlx5_switchdev_event { + MLX5_SWITCHDEV_EVENT_PAIR, + MLX5_SWITCHDEV_EVENT_UNPAIR, +}; + struct mlx5_eswitch_rep; struct mlx5_eswitch_rep_ops { int (*load)(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep); void (*unload)(struct mlx5_eswitch_rep *rep); void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); + int (*event)(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + enum mlx5_switchdev_event event, + void *data); }; struct mlx5_eswitch_rep_data { @@ -63,6 +72,7 @@ struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, + struct mlx5_eswitch *from_esw, struct mlx5_eswitch_rep *rep, u32 sqn); #ifdef CONFIG_MLX5_ESWITCH @@ -128,6 +138,7 @@ u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw, u8 mlx5_eswitch_mode(struct mlx5_core_dev *dev); u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev); +struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw); #else /* CONFIG_MLX5_ESWITCH */ @@ -171,6 +182,11 @@ static inline u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) return 0; } +static inline struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) +{ + return NULL; +} + #endif /* CONFIG_MLX5_ESWITCH */ static inline bool is_mdev_switchdev_mode(struct mlx5_core_dev *dev) diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 77746f7e35b8..0106c67e8ccb 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -38,6 +38,8 @@ #define MLX5_FS_DEFAULT_FLOW_TAG 0x0 +#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) + enum { MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO = 1 << 16, MLX5_FLOW_CONTEXT_ACTION_ENCRYPT = 1 << 17, diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index b0009aa3647f..f3638d09ba77 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -865,7 +865,8 @@ struct mlx5_ifc_qos_cap_bits { u8 nic_bw_share[0x1]; u8 nic_rate_limit[0x1]; u8 packet_pacing_uid[0x1]; - u8 reserved_at_c[0x14]; + u8 log_esw_max_sched_depth[0x4]; + u8 reserved_at_10[0x10]; u8 reserved_at_20[0xb]; u8 log_max_qos_nic_queue_group[0x5]; @@ -921,7 +922,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 scatter_fcs[0x1]; u8 enhanced_multi_pkt_send_wqe[0x1]; u8 tunnel_lso_const_out_ip_id[0x1]; - u8 reserved_at_1c[0x2]; + u8 tunnel_lro_gre[0x1]; + u8 tunnel_lro_vxlan[0x1]; u8 tunnel_stateless_gre[0x1]; u8 tunnel_stateless_vxlan[0x1]; @@ -1651,7 +1653,13 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 max_geneve_tlv_option_data_len[0x5]; u8 reserved_at_570[0x10]; - u8 reserved_at_580[0x33]; + u8 reserved_at_580[0xb]; + u8 log_max_dci_stream_channels[0x5]; + u8 reserved_at_590[0x3]; + u8 log_max_dci_errored_streams[0x5]; + u8 reserved_at_598[0x8]; + + u8 reserved_at_5a0[0x13]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; @@ -3020,10 +3028,12 @@ struct mlx5_ifc_qpc_bits { u8 reserved_at_3c0[0x8]; u8 next_send_psn[0x18]; - u8 reserved_at_3e0[0x8]; + u8 reserved_at_3e0[0x3]; + u8 log_num_dci_stream_channels[0x5]; u8 cqn_snd[0x18]; - u8 reserved_at_400[0x8]; + u8 reserved_at_400[0x3]; + u8 log_num_dci_errored_streams[0x5]; u8 deth_sqpn[0x18]; u8 reserved_at_420[0x20]; @@ -3911,7 +3921,7 @@ struct mlx5_ifc_cqc_bits { u8 status[0x4]; u8 reserved_at_4[0x2]; u8 dbr_umem_valid[0x1]; - u8 apu_thread_cq[0x1]; + u8 apu_cq[0x1]; u8 cqe_sz[0x3]; u8 cc[0x1]; u8 reserved_at_c[0x1]; @@ -3937,8 +3947,7 @@ struct mlx5_ifc_cqc_bits { u8 cq_period[0xc]; u8 cq_max_count[0x10]; - u8 reserved_at_a0[0x18]; - u8 c_eqn[0x8]; + u8 c_eqn_or_apu_element[0x20]; u8 reserved_at_c0[0x3]; u8 log_page_size[0x5]; diff --git a/include/linux/mlx5/mlx5_ifc_vdpa.h b/include/linux/mlx5/mlx5_ifc_vdpa.h index 98b56b75c625..1a9c9d94cb59 100644 --- a/include/linux/mlx5/mlx5_ifc_vdpa.h +++ b/include/linux/mlx5/mlx5_ifc_vdpa.h @@ -11,13 +11,15 @@ enum { }; enum { - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? - MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, }; enum { - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, - MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT), + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = + BIT(MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED), }; struct mlx5_ifc_virtio_q_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 7ca22e6e694a..73a52aba448f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -281,7 +281,6 @@ extern unsigned int kobjsize(const void *objp); #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ #define VM_UFFD_MISSING 0x00000200 /* missing pages tracking */ #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ -#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ #define VM_UFFD_WP 0x00001000 /* wrprotect pages tracking */ #define VM_LOCKED 0x00002000 @@ -829,6 +828,8 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags) return kvmalloc_array(n, size, flags | __GFP_ZERO); } +extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, + gfp_t flags); extern void kvfree(const void *addr); extern void kvfree_sensitive(const void *addr, size_t len); @@ -1214,8 +1215,8 @@ static inline void get_page(struct page *page) } bool __must_check try_grab_page(struct page *page, unsigned int flags); -__maybe_unused struct page *try_grab_compound_head(struct page *page, int refs, - unsigned int flags); +struct page *try_grab_compound_head(struct page *page, int refs, + unsigned int flags); static inline __must_check bool try_get_page(struct page *page) @@ -1847,7 +1848,6 @@ int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc, struct kvec; int get_kernel_pages(const struct kvec *iov, int nr_pages, int write, struct page **pages); -int get_kernel_page(unsigned long start, int write, struct page **pages); struct page *get_dump_page(unsigned long addr); extern int try_to_release_page(struct page * page, gfp_t gfp_mask); @@ -2580,7 +2580,8 @@ static inline int check_data_rlimit(unsigned long rlim, extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); -extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); +extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); extern struct file *get_mm_exe_file(struct mm_struct *mm); extern struct file *get_task_exe_file(struct task_struct *task); @@ -3119,7 +3120,7 @@ extern void memory_failure_queue_kick(int cpu); extern int unpoison_memory(unsigned long pfn); extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; -extern void shake_page(struct page *p, int access); +extern void shake_page(struct page *p); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(unsigned long pfn, int flags); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 52bbd2b7cb46..7f8ee09c711f 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -103,11 +103,19 @@ struct page { unsigned long pp_magic; struct page_pool *pp; unsigned long _pp_mapping_pad; - /** - * @dma_addr: might require a 64-bit value on - * 32-bit architectures. - */ - unsigned long dma_addr[2]; + unsigned long dma_addr; + union { + /** + * dma_addr_upper: might require a 64-bit + * value on 32-bit architectures. + */ + unsigned long dma_addr_upper; + /** + * For frag page support, not supported in + * 32-bit architectures with 64-bit DMA. + */ + atomic_long_t pp_frag_count; + }; }; struct { /* slab, slob and slub */ union { diff --git a/include/linux/mman.h b/include/linux/mman.h index ebb09a964272..b66e91b8176c 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -32,7 +32,8 @@ * The historical set of flags that all mmap implementations implicitly * support when a ->mmap_validate() op is not provided in file_operations. * - * MAP_EXECUTABLE is completely ignored throughout the kernel. + * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the + * kernel. */ #define LEGACY_MAP_MASK (MAP_SHARED \ | MAP_PRIVATE \ @@ -153,7 +154,6 @@ static inline unsigned long calc_vm_flag_bits(unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | - _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | arch_calc_vm_flag_bits(flags); diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 0540f0156f58..b179f1e3541a 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -101,14 +101,14 @@ static inline bool mmap_write_trylock(struct mm_struct *mm) static inline void mmap_write_unlock(struct mm_struct *mm) { - up_write(&mm->mmap_lock); __mmap_lock_trace_released(mm, true); + up_write(&mm->mmap_lock); } static inline void mmap_write_downgrade(struct mm_struct *mm) { - downgrade_write(&mm->mmap_lock); __mmap_lock_trace_acquire_returned(mm, false, true); + downgrade_write(&mm->mmap_lock); } static inline void mmap_read_lock(struct mm_struct *mm) @@ -140,8 +140,8 @@ static inline bool mmap_read_trylock(struct mm_struct *mm) static inline void mmap_read_unlock(struct mm_struct *mm) { - up_read(&mm->mmap_lock); __mmap_lock_trace_released(mm, false); + up_read(&mm->mmap_lock); } static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) @@ -155,8 +155,8 @@ static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm) static inline void mmap_read_unlock_non_owner(struct mm_struct *mm) { - up_read_non_owner(&mm->mmap_lock); __mmap_lock_trace_released(mm, false); + up_read_non_owner(&mm->mmap_lock); } static inline void mmap_assert_locked(struct mm_struct *mm) diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index 74e6c0624d27..37f975875102 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h @@ -109,6 +109,7 @@ struct mmc_ext_csd { u8 raw_hc_erase_gap_size; /* 221 */ u8 raw_erase_timeout_mult; /* 223 */ u8 raw_hc_erase_grp_size; /* 224 */ + u8 raw_boot_mult; /* 226 */ u8 raw_sec_trim_mult; /* 229 */ u8 raw_sec_erase_mult; /* 230 */ u8 raw_sec_feature_support;/* 231 */ diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index ab19245e9945..71101d1ec825 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -164,9 +164,8 @@ struct mmc_request { int tag; #ifdef CONFIG_MMC_CRYPTO - bool crypto_enabled; + const struct bio_crypt_ctx *crypto_ctx; int crypto_key_slot; - u32 data_unit_num; #endif }; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 0abd47e9ef9b..0c0c9a0fdf57 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -153,7 +153,7 @@ struct mmc_host_ops { int (*start_signal_voltage_switch)(struct mmc_host *host, struct mmc_ios *ios); - /* Check if the card is pulling dat[0:3] low */ + /* Check if the card is pulling dat[0] low */ int (*card_busy)(struct mmc_host *host); /* The tuning command opcode value is different for SD and eMMC cards */ @@ -398,6 +398,7 @@ struct mmc_host { #else #define MMC_CAP2_CRYPTO 0 #endif +#define MMC_CAP2_ALT_GPT_TEGRA (1 << 28) /* Host with eMMC that has GPT entry at a non-standard location */ int fixed_drv_type; /* fixed driver type for non-removable media */ diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 12036619346c..a85c9f0bd470 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h @@ -75,6 +75,7 @@ #define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf +#define SDIO_DEVICE_ID_BROADCOM_CYPRESS_43752 0xaae8 #define SDIO_VENDOR_ID_MARVELL 0x02df #define SDIO_DEVICE_ID_MARVELL_LIBERTAS 0x9103 diff --git a/include/linux/mmu_context.h b/include/linux/mmu_context.h index 03dee12d2b61..b9b970f7ab45 100644 --- a/include/linux/mmu_context.h +++ b/include/linux/mmu_context.h @@ -14,4 +14,18 @@ static inline void leave_mm(int cpu) { } #endif +/* + * CPUs that are capable of running user task @p. Must contain at least one + * active CPU. It is assumed that the kernel can run on all CPUs, so calling + * this for a kernel thread is pointless. + * + * By default, we assume a sane, homogeneous system. + */ +#ifndef task_cpu_possible_mask +# define task_cpu_possible_mask(p) cpu_possible_mask +# define task_cpu_possible(cpu, p) true +#else +# define task_cpu_possible(cpu, p) cpumask_test_cpu((cpu), task_cpu_possible_mask(p)) +#endif + #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index fcb535560028..6a1d79d84675 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -540,6 +540,10 @@ struct zone { * is calculated as: * present_pages = spanned_pages - absent_pages(pages in holes); * + * present_early_pages is present pages existing within the zone + * located on memory available since early boot, excluding hotplugged + * memory. + * * managed_pages is present pages managed by the buddy system, which * is calculated as (reserved_pages includes pages allocated by the * bootmem allocator): @@ -572,6 +576,9 @@ struct zone { atomic_long_t managed_pages; unsigned long spanned_pages; unsigned long present_pages; +#if defined(CONFIG_MEMORY_HOTPLUG) + unsigned long present_early_pages; +#endif #ifdef CONFIG_CMA unsigned long cma_pages; #endif @@ -846,6 +853,7 @@ typedef struct pglist_data { enum zone_type kcompactd_highest_zoneidx; wait_queue_head_t kcompactd_wait; struct task_struct *kcompactd; + bool proactive_compact_trigger; #endif /* * This is a per-node reserve of pages that are not available @@ -1342,7 +1350,6 @@ static inline struct mem_section *__nr_to_section(unsigned long nr) return NULL; return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK]; } -extern unsigned long __section_nr(struct mem_section *ms); extern size_t mem_section_usage_size(void); /* @@ -1365,7 +1372,7 @@ extern size_t mem_section_usage_size(void); #define SECTION_TAINT_ZONE_DEVICE (1UL<<4) #define SECTION_MAP_LAST_BIT (1UL<<5) #define SECTION_MAP_MASK (~(SECTION_MAP_LAST_BIT-1)) -#define SECTION_NID_SHIFT 3 +#define SECTION_NID_SHIFT 6 static inline struct page *__section_mem_map_addr(struct mem_section *section) { @@ -1525,18 +1532,6 @@ void sparse_init(void); #define subsection_map_init(_pfn, _nr_pages) do {} while (0) #endif /* CONFIG_SPARSEMEM */ -/* - * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we - * need to check pfn validity within that MAX_ORDER_NR_PAGES block. - * pfn_valid_within() should be used in this case; we optimise this away - * when we have no holes within a MAX_ORDER_NR_PAGES block. - */ -#ifdef CONFIG_HOLES_IN_ZONE -#define pfn_valid_within(pfn) pfn_valid(pfn) -#else -#define pfn_valid_within(pfn) (1) -#endif - #endif /* !__GENERATING_BOUNDS.H */ #endif /* !__ASSEMBLY__ */ #endif /* _LINUX_MMZONE_H */ diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 8e291cfdaf06..ae2e75d15b21 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h @@ -16,6 +16,10 @@ typedef unsigned long kernel_ulong_t; #define PCI_ANY_ID (~0) +enum { + PCI_ID_F_VFIO_DRIVER_OVERRIDE = 1, +}; + /** * struct pci_device_id - PCI device ID structure * @vendor: Vendor ID to match (or PCI_ANY_ID) @@ -34,12 +38,14 @@ typedef unsigned long kernel_ulong_t; * Best practice is to use driver_data as an index * into a static list of equivalent device types, * instead of using it as a pointer. + * @override_only: Match only when dev->driver_override is this driver. */ struct pci_device_id { __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ __u32 subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ __u32 class, class_mask; /* (class,subclass,prog-if) triplet */ kernel_ulong_t driver_data; /* Data private to the driver */ + __u32 override_only; }; diff --git a/include/linux/module.h b/include/linux/module.h index 8a298d820dbc..c9f1200b2312 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -511,6 +511,11 @@ struct module { struct klp_modinfo *klp_info; #endif +#ifdef CONFIG_PRINTK_INDEX + unsigned int printk_index_size; + struct pi_entry **printk_index_start; +#endif + #ifdef CONFIG_MODULE_UNLOAD /* What modules depend on me? */ struct list_head source_list; diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index eed280fae433..962cd41a2cb5 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -431,6 +431,8 @@ extern int param_get_int(char *buffer, const struct kernel_param *kp); extern const struct kernel_param_ops param_ops_uint; extern int param_set_uint(const char *val, const struct kernel_param *kp); extern int param_get_uint(char *buffer, const struct kernel_param *kp); +int param_set_uint_minmax(const char *val, const struct kernel_param *kp, + unsigned int min, unsigned int max); #define param_check_uint(name, p) __param_check(name, p, unsigned int) extern const struct kernel_param_ops param_ops_long; diff --git a/include/linux/msi.h b/include/linux/msi.h index 6aff469e511d..49cf6eb222e7 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -107,7 +107,8 @@ struct ti_sci_inta_msi_desc { * address or data changes * @write_msi_msg_data: Data parameter for the callback. * - * @masked: [PCI MSI/X] Mask bits + * @msi_mask: [PCI MSI] MSI cached mask bits + * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits * @is_msix: [PCI MSI/X] True if MSI-X * @multiple: [PCI MSI/X] log2 num of messages allocated * @multi_cap: [PCI MSI/X] log2 num of messages supported @@ -139,7 +140,10 @@ struct msi_desc { union { /* PCI MSI/X specific data */ struct { - u32 masked; + union { + u32 msi_mask; + u32 msix_ctrl; + }; struct { u8 is_msix : 1; u8 multiple : 3; @@ -232,11 +236,13 @@ void free_msi_entry(struct msi_desc *entry); void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); -u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); void pci_msi_mask_irq(struct irq_data *data); void pci_msi_unmask_irq(struct irq_data *data); +const struct attribute_group **msi_populate_sysfs(struct device *dev); +void msi_destroy_sysfs(struct device *dev, + const struct attribute_group **msi_irq_groups); + /* * The arch hooks to setup up msi irqs. Default functions are implemented * as weak symbols so that they /can/ be overriden by architecture specific diff --git a/include/linux/mutex.h b/include/linux/mutex.h index e19323521f9c..8f226d460f51 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -20,8 +20,17 @@ #include <linux/osq_lock.h> #include <linux/debug_locks.h> -struct ww_class; -struct ww_acquire_ctx; +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* * Simple, straightforward mutexes with strict semantics: @@ -53,7 +62,7 @@ struct ww_acquire_ctx; */ struct mutex { atomic_long_t owner; - spinlock_t wait_lock; + raw_spinlock_t wait_lock; #ifdef CONFIG_MUTEX_SPIN_ON_OWNER struct optimistic_spin_queue osq; /* Spinner MCS lock */ #endif @@ -66,27 +75,6 @@ struct mutex { #endif }; -struct ww_mutex { - struct mutex base; - struct ww_acquire_ctx *ctx; -#ifdef CONFIG_DEBUG_MUTEXES - struct ww_class *ww_class; -#endif -}; - -/* - * This is the control structure for tasks blocked on mutex, - * which resides on the blocked task's kernel stack: - */ -struct mutex_waiter { - struct list_head list; - struct task_struct *task; - struct ww_acquire_ctx *ww_ctx; -#ifdef CONFIG_DEBUG_MUTEXES - void *magic; -#endif -}; - #ifdef CONFIG_DEBUG_MUTEXES #define __DEBUG_MUTEX_INITIALIZER(lockname) \ @@ -117,19 +105,9 @@ do { \ __mutex_init((mutex), #mutex, &__key); \ } while (0) -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .owner = ATOMIC_LONG_INIT(0) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ + , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ , .wait_list = LIST_HEAD_INIT(lockname.wait_list) \ __DEBUG_MUTEX_INITIALIZER(lockname) \ __DEP_MAP_MUTEX_INITIALIZER(lockname) } @@ -148,6 +126,50 @@ extern void __mutex_init(struct mutex *lock, const char *name, */ extern bool mutex_is_locked(struct mutex *lock); +#else /* !CONFIG_PREEMPT_RT */ +/* + * Preempt-RT variant based on rtmutexes. + */ +#include <linux/rtmutex.h> + +struct mutex { + struct rt_mutex_base rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ +} + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_rt_init(struct mutex *lock, const char *name, + struct lock_class_key *key); +extern int mutex_trylock(struct mutex *lock); + +static inline void mutex_destroy(struct mutex *lock) { } + +#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex) + +#define __mutex_init(mutex, name, key) \ +do { \ + rt_mutex_base_init(&(mutex)->rtmutex); \ + __mutex_rt_init((mutex), name, key); \ +} while (0) + +#define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + __mutex_init((mutex), #mutex, &__key); \ +} while (0) +#endif /* CONFIG_PREEMPT_RT */ + /* * See kernel/locking/mutex.c for detailed documentation of these APIs. * Also see Documentation/locking/mutex-design.rst. diff --git a/include/linux/namei.h b/include/linux/namei.h index be9a2b349ca7..e89329bb3134 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -68,6 +68,7 @@ extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int); extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int); +struct dentry *lookup_one(struct user_namespace *, const char *, struct dentry *, int); extern int follow_down_one(struct path *); extern int follow_down(struct path *); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index eaf5bb008aa9..d79163208dfd 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -47,6 +47,7 @@ #include <uapi/linux/if_bonding.h> #include <uapi/linux/pkt_cls.h> #include <linux/hashtable.h> +#include <linux/rbtree.h> struct netpoll_info; struct device; @@ -208,6 +209,7 @@ struct sk_buff; struct netdev_hw_addr { struct list_head list; + struct rb_node node; unsigned char addr[MAX_ADDR_LEN]; unsigned char type; #define NETDEV_HW_ADDR_T_LAN 1 @@ -224,6 +226,9 @@ struct netdev_hw_addr { struct netdev_hw_addr_list { struct list_head list; int count; + + /* Auxiliary tree for faster lookup on addition and deletion */ + struct rb_root tree; }; #define netdev_hw_addr_list_count(l) ((l)->count) @@ -295,18 +300,6 @@ enum netdev_state_t { }; -/* - * This structure holds boot-time configured netdevice settings. They - * are then used in the device probing. - */ -struct netdev_boot_setup { - char name[IFNAMSIZ]; - struct ifmap map; -}; -#define NETDEV_BOOT_SETUP_MAX 8 - -int __init netdev_boot_setup(char *str); - struct gro_list { struct list_head list; int count; @@ -734,13 +727,13 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index, u32 flow_id, /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { + struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_RPS struct rps_map __rcu *rps_map; struct rps_dev_flow_table __rcu *rps_flow_table; #endif struct kobject kobj; struct net_device *dev; - struct xdp_rxq_info xdp_rxq; #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif @@ -1086,9 +1079,18 @@ struct netdev_net_notifier { * Test if Media Access Control address is valid for the device. * * int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); - * Called when a user requests an ioctl which can't be handled by - * the generic interface code. If not defined ioctls return - * not supported error code. + * Old-style ioctl entry point. This is used internally by the + * appletalk and ieee802154 subsystems but is no longer called by + * the device ioctl handler. + * + * int (*ndo_siocbond)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Used by the bonding driver for its device specific ioctls: + * SIOCBONDENSLAVE, SIOCBONDRELEASE, SIOCBONDSETHWADDR, SIOCBONDCHANGEACTIVE, + * SIOCBONDSLAVEINFOQUERY, and SIOCBONDINFOQUERY + * + * * int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + * Called for ethernet specific ioctls: SIOCGMIIPHY, SIOCGMIIREG, + * SIOCSMIIREG, SIOCSHWTSTAMP and SIOCGHWTSTAMP. * * int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); * Used to set network devices bus interface parameters. This interface @@ -1321,6 +1323,9 @@ struct netdev_net_notifier { * that got dropped are freed/returned via xdp_return_frame(). * Returns negative number, means general error invoking ndo, meaning * no frames were xmit'ed and core-caller will free all frames. + * struct net_device *(*ndo_xdp_get_xmit_slave)(struct net_device *dev, + * struct xdp_buff *xdp); + * Get the xmit slave of master device based on the xdp_buff. * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); * This function is used to wake up the softirq, ksoftirqd or kthread * responsible for sending and/or receiving packets on a specific @@ -1361,6 +1366,15 @@ struct net_device_ops { int (*ndo_validate_addr)(struct net_device *dev); int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd); + int (*ndo_eth_ioctl)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_siocbond)(struct net_device *dev, + struct ifreq *ifr, int cmd); + int (*ndo_siocwandev)(struct net_device *dev, + struct if_settings *ifs); + int (*ndo_siocdevprivate)(struct net_device *dev, + struct ifreq *ifr, + void __user *data, int cmd); int (*ndo_set_config)(struct net_device *dev, struct ifmap *map); int (*ndo_change_mtu)(struct net_device *dev, @@ -1539,6 +1553,8 @@ struct net_device_ops { int (*ndo_xdp_xmit)(struct net_device *dev, int n, struct xdp_frame **xdp, u32 flags); + struct net_device * (*ndo_xdp_get_xmit_slave)(struct net_device *dev, + struct xdp_buff *xdp); int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); @@ -1805,6 +1821,7 @@ enum netdev_ml_priv_type { * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network * device struct * @mpls_ptr: mpls_dev struct pointer + * @mctp_ptr: MCTP specific data * * @dev_addr: Hw address (before bcast, * because most packets are unicast) @@ -2092,6 +2109,9 @@ struct net_device { #if IS_ENABLED(CONFIG_MPLS_ROUTING) struct mpls_dev __rcu *mpls_ptr; #endif +#if IS_ENABLED(CONFIG_MCTP) + struct mctp_dev __rcu *mctp_ptr; +#endif /* * Cache lines mostly used on receive path (including eth_type_trans()) @@ -2917,7 +2937,6 @@ static inline struct net_device *first_net_device_rcu(struct net *net) } int netdev_boot_setup_check(struct net_device *dev); -unsigned long netdev_boot_base(const char *prefix, int unit); struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type, const char *hwaddr); struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type); @@ -3289,14 +3308,6 @@ static inline bool dev_has_header(const struct net_device *dev) return dev->header_ops && dev->header_ops->create; } -typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, - int len, int size); -int register_gifconf(unsigned int family, gifconf_func_t *gifconf); -static inline int unregister_gifconf(unsigned int family) -{ - return register_gifconf(family, NULL); -} - #ifdef CONFIG_NET_FLOW_LIMIT #define FLOW_LIMIT_HISTORY (1 << 7) /* must be ^2 and !overflow buckets */ struct sd_flow_limit { @@ -3915,6 +3926,8 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, return 0; } #endif +int netif_set_real_num_queues(struct net_device *dev, + unsigned int txq, unsigned int rxq); static inline struct netdev_rx_queue * __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) @@ -3948,7 +3961,7 @@ void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason); /* * It is not allowed to call kfree_skb() or consume_skb() from hardware * interrupt context or with hardware interrupts being disabled. - * (in_irq() || irqs_disabled()) + * (in_hardirq() || irqs_disabled()) * * We provide four helpers that can be used in following contexts : * @@ -3984,6 +3997,8 @@ static inline void dev_consume_skb_any(struct sk_buff *skb) __dev_kfree_skb_any(skb, SKB_REASON_CONSUMED); } +u32 bpf_prog_run_generic_xdp(struct sk_buff *skb, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog); void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff *skb); int netif_rx(struct sk_buff *skb); @@ -4012,10 +4027,16 @@ int netdev_rx_handler_register(struct net_device *dev, void netdev_rx_handler_unregister(struct net_device *dev); bool dev_valid_name(const char *name); +static inline bool is_socket_ioctl_cmd(unsigned int cmd) +{ + return _IOC_TYPE(cmd) == SOCK_IOC_TYPE; +} +int get_user_ifreq(struct ifreq *ifr, void __user **ifrdata, void __user *arg); +int put_user_ifreq(struct ifreq *ifr, void __user *arg); int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, - bool *need_copyout); -int dev_ifconf(struct net *net, struct ifconf *, int); -int dev_ethtool(struct net *net, struct ifreq *); + void __user *data, bool *need_copyout); +int dev_ifconf(struct net *net, struct ifconf __user *ifc); +int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, struct netlink_ext_ack *extack); @@ -4069,6 +4090,7 @@ typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, int fd, int expected_fd, u32 flags); int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +u8 dev_xdp_prog_count(struct net_device *dev); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); @@ -4136,11 +4158,13 @@ void netdev_run_todo(void); */ static inline void dev_put(struct net_device *dev) { + if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_dec(*dev->pcpu_refcnt); + this_cpu_dec(*dev->pcpu_refcnt); #else - refcount_dec(&dev->dev_refcnt); + refcount_dec(&dev->dev_refcnt); #endif + } } /** @@ -4151,11 +4175,13 @@ static inline void dev_put(struct net_device *dev) */ static inline void dev_hold(struct net_device *dev) { + if (dev) { #ifdef CONFIG_PCPU_DEV_REFCNT - this_cpu_inc(*dev->pcpu_refcnt); + this_cpu_inc(*dev->pcpu_refcnt); #else - refcount_inc(&dev->dev_refcnt); + refcount_inc(&dev->dev_refcnt); #endif + } } /* Carrier loss detection, dial on demand. The functions netif_carrier_on @@ -4615,6 +4641,24 @@ void __hw_addr_unsync_dev(struct netdev_hw_addr_list *list, void __hw_addr_init(struct netdev_hw_addr_list *list); /* Functions used for device addresses handling */ +static inline void +__dev_addr_set(struct net_device *dev, const u8 *addr, size_t len) +{ + memcpy(dev->dev_addr, addr, len); +} + +static inline void dev_addr_set(struct net_device *dev, const u8 *addr) +{ + __dev_addr_set(dev, addr, dev->addr_len); +} + +static inline void +dev_addr_mod(struct net_device *dev, unsigned int offset, + const u8 *addr, size_t len) +{ + memcpy(&dev->dev_addr[offset], addr, len); +} + int dev_addr_add(struct net_device *dev, const unsigned char *addr, unsigned char addr_type); int dev_addr_del(struct net_device *dev, const unsigned char *addr, diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 10279c4830ac..ada1296c87d5 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h @@ -196,6 +196,9 @@ struct ip_set_region { u32 elements; /* Number of elements vs timeout */ }; +/* Max range where every element is added/deleted in one step */ +#define IPSET_MAX_RANGE (1<<20) + /* The max revision number supported by any set type + 1 */ #define IPSET_REVISION_MAX 9 diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 0c7d8d1e945d..700ea077ce2d 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h @@ -18,6 +18,7 @@ struct ip_conntrack_stat { unsigned int expect_create; unsigned int expect_delete; unsigned int search_restart; + unsigned int chaintoolong; }; #define NFCT_INFOMASK 7UL diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 28d7027cd460..5897f3dbaf7c 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -238,9 +238,6 @@ struct xt_table { u_int8_t af; /* address/protocol family */ int priority; /* hook order */ - /* called when table is needed in the given netns */ - int (*table_init)(struct net *net); - /* A unique name... */ const char name[XT_TABLE_MAXNAMELEN]; }; @@ -452,6 +449,9 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu) struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *); +int xt_register_template(const struct xt_table *t, int(*table_init)(struct net *net)); +void xt_unregister_template(const struct xt_table *t); + #ifdef CONFIG_NETFILTER_XTABLES_COMPAT #include <net/compat.h> diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index a8178253ce53..10a01978bc0d 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h @@ -127,4 +127,6 @@ static inline bool ebt_invalid_target(int target) return (target < -NUM_STANDARD_TARGETS || target >= 0); } +int ebt_register_template(const struct ebt_table *t, int(*table_init)(struct net *net)); +void ebt_unregister_template(const struct ebt_table *t); #endif diff --git a/include/linux/netfs.h b/include/linux/netfs.h index 9062adfa2fb9..5d6a4158a9a6 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -102,6 +102,7 @@ struct netfs_cache_resources { const struct netfs_cache_ops *ops; void *cache_priv; void *cache_priv2; + unsigned int debug_id; /* Cookie debug ID */ }; /* @@ -137,7 +138,6 @@ struct netfs_read_request { struct list_head subrequests; /* Requests to fetch I/O from disk or net */ void *netfs_priv; /* Private data for the netfs */ unsigned int debug_id; - unsigned int cookie_debug_id; atomic_t nr_rd_ops; /* Number of read ops in progress */ atomic_t nr_wr_ops; /* Number of write ops in progress */ size_t submitted; /* Amount submitted for I/O so far */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index ce6474594872..b9a8b925db43 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -41,6 +41,11 @@ #include <linux/mempool.h> /* + * These are the default for number of transports to different server IPs + */ +#define NFS_MAX_TRANSPORTS 16 + +/* * These are the default flags for swap requests */ #define NFS_RPC_SWAPFLAGS (RPC_TASK_SWAPPER|RPC_TASK_ROOTCREDS) diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index d71a0e90faeb..2a9acbfe00f0 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -62,6 +62,7 @@ struct nfs_client { u32 cl_minorversion;/* NFSv4 minorversion */ unsigned int cl_nconnect; /* Number of connections */ + unsigned int cl_max_connect; /* max number of xprts allowed */ const char * cl_principal; /* used for machine cred */ #if IS_ENABLED(CONFIG_NFS_V4) diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 2fb373a5c1ed..87069b8459af 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -168,8 +168,6 @@ extern int raw_notifier_call_chain(struct raw_notifier_head *nh, extern int srcu_notifier_call_chain(struct srcu_notifier_head *nh, unsigned long val, void *v); -extern int atomic_notifier_call_chain_robust(struct atomic_notifier_head *nh, - unsigned long val_up, unsigned long val_down, void *v); extern int blocking_notifier_call_chain_robust(struct blocking_notifier_head *nh, unsigned long val_up, unsigned long val_down, void *v); extern int raw_notifier_call_chain_robust(struct raw_notifier_head *nh, diff --git a/include/linux/nubus.h b/include/linux/nubus.h index eba50b057f6f..392fc6c53e96 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h @@ -86,7 +86,7 @@ extern struct list_head nubus_func_rsrcs; struct nubus_driver { struct device_driver driver; int (*probe)(struct nubus_board *board); - int (*remove)(struct nubus_board *board); + void (*remove)(struct nubus_board *board); }; extern struct bus_type nubus_bus_type; diff --git a/include/linux/of.h b/include/linux/of.h index 9c2e71e202d1..6f1c41f109bb 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -896,7 +896,7 @@ static inline int of_parse_phandle_with_fixed_args(const struct device_node *np, return -ENOSYS; } -static inline int of_count_phandle_with_args(struct device_node *np, +static inline int of_count_phandle_with_args(const struct device_node *np, const char *list_name, const char *cells_name) { @@ -946,6 +946,11 @@ static inline int of_machine_is_compatible(const char *compat) return 0; } +static inline int of_add_property(struct device_node *np, struct property *prop) +{ + return 0; +} + static inline int of_remove_property(struct device_node *np, struct property *prop) { return 0; diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index acf820e88952..cf6a65b94d40 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -67,9 +67,6 @@ extern void early_init_fdt_scan_reserved_mem(void); extern void early_init_fdt_reserve_self(void); extern void __init early_init_dt_scan_chosen_arch(unsigned long node); extern void early_init_dt_add_memory_arch(u64 base, u64 size); -extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); -extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, - bool no_map); extern u64 dt_mem_next_cell(int s, const __be32 **cellp); /* Early flat tree scan hooks */ diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index f821095218b0..8bf2ea859653 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h @@ -49,7 +49,7 @@ static inline struct of_mm_gpio_chip *to_of_mm_gpio_chip(struct gpio_chip *gc) return container_of(gc, struct of_mm_gpio_chip, gc); } -extern int of_get_named_gpio_flags(struct device_node *np, +extern int of_get_named_gpio_flags(const struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags); extern int of_mm_gpiochip_add_data(struct device_node *np, @@ -67,7 +67,7 @@ extern void of_mm_gpiochip_remove(struct of_mm_gpio_chip *mm_gc); #include <linux/errno.h> /* Drivers may not strictly depend on the GPIO support, so let them link. */ -static inline int of_get_named_gpio_flags(struct device_node *np, +static inline int of_get_named_gpio_flags(const struct device_node *np, const char *list_name, int index, enum of_gpio_flags *flags) { if (flags) @@ -98,7 +98,8 @@ static inline int of_get_named_gpio_flags(struct device_node *np, * The above example defines four GPIOs, two of which are not specified. * This function will return '4' */ -static inline int of_gpio_named_count(struct device_node *np, const char* propname) +static inline int of_gpio_named_count(const struct device_node *np, + const char *propname) { return of_count_phandle_with_args(np, propname, "#gpio-cells"); } @@ -109,12 +110,12 @@ static inline int of_gpio_named_count(struct device_node *np, const char* propna * * Same as of_gpio_named_count, but hard coded to use the 'gpios' property */ -static inline int of_gpio_count(struct device_node *np) +static inline int of_gpio_count(const struct device_node *np) { return of_gpio_named_count(np, "gpios"); } -static inline int of_get_gpio_flags(struct device_node *np, int index, +static inline int of_get_gpio_flags(const struct device_node *np, int index, enum of_gpio_flags *flags) { return of_get_named_gpio_flags(np, "gpios", index, flags); @@ -129,7 +130,7 @@ static inline int of_get_gpio_flags(struct device_node *np, int index, * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ -static inline int of_get_named_gpio(struct device_node *np, +static inline int of_get_named_gpio(const struct device_node *np, const char *propname, int index) { return of_get_named_gpio_flags(np, propname, index, NULL); @@ -143,7 +144,7 @@ static inline int of_get_named_gpio(struct device_node *np, * Returns GPIO number to use with Linux generic GPIO API, or one of the errno * value on the error condition. */ -static inline int of_get_gpio(struct device_node *np, int index) +static inline int of_get_gpio(const struct device_node *np, int index) { return of_get_gpio_flags(np, index, NULL); } diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 3d8db1f6a5db..0f4a8903922a 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -70,6 +70,9 @@ enum OID { OID_spnego, /* 1.3.6.1.5.5.2 */ + OID_IAKerb, /* 1.3.6.1.5.2.5 */ + OID_PKU2U, /* 1.3.5.1.5.2.7 */ + OID_Scram, /* 1.3.6.1.5.5.14 */ OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ OID_id_ansip384r1, /* 1.3.132.0.34 */ @@ -104,6 +107,10 @@ enum OID { OID_authorityKeyIdentifier, /* 2.5.29.35 */ OID_extKeyUsage, /* 2.5.29.37 */ + /* Heimdal mechanisms */ + OID_NetlogonMechanism, /* 1.2.752.43.14.2 */ + OID_appleLocalKdcSupported, /* 1.2.752.43.14.3 */ + /* EC-RDSA */ OID_gostCPSignA, /* 1.2.643.2.2.35.1 */ OID_gostCPSignB, /* 1.2.643.2.2.35.2 */ diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index b7bf735960c2..082841908fe7 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -81,9 +81,6 @@ extern int gpmc_configure(int cmd, int wval); extern void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p); -extern void omap3_gpmc_save_context(void); -extern void omap3_gpmc_restore_context(void); - struct gpmc_timings; struct omap_nand_platform_data; struct omap_onenand_platform_data; diff --git a/include/linux/once.h b/include/linux/once.h index 9225ee6d96c7..d361fb14ac3a 100644 --- a/include/linux/once.h +++ b/include/linux/once.h @@ -7,7 +7,7 @@ bool __do_once_start(bool *done, unsigned long *flags); void __do_once_done(bool *done, struct static_key_true *once_key, - unsigned long *flags); + unsigned long *flags, struct module *mod); /* Call a function exactly once. The idea of DO_ONCE() is to perform * a function call such as initialization of random seeds, etc, only @@ -16,7 +16,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, * out the condition into a nop. DO_ONCE() guarantees type safety of * arguments! * - * Not that the following is not equivalent ... + * Note that the following is not equivalent ... * * DO_ONCE(func, arg); * DO_ONCE(func, arg); @@ -46,7 +46,7 @@ void __do_once_done(bool *done, struct static_key_true *once_key, if (unlikely(___ret)) { \ func(__VA_ARGS__); \ __do_once_done(&___done, &___once_key, \ - &___flags); \ + &___flags, THIS_MODULE); \ } \ } \ ___ret; \ diff --git a/include/linux/padata.h b/include/linux/padata.h index a433f13fc4bf..495b16b6b4d7 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -12,6 +12,7 @@ #ifndef PADATA_H #define PADATA_H +#include <linux/refcount.h> #include <linux/compiler_types.h> #include <linux/workqueue.h> #include <linux/spinlock.h> @@ -96,7 +97,7 @@ struct parallel_data { struct padata_shell *ps; struct padata_list __percpu *reorder_list; struct padata_serial_queue __percpu *squeue; - atomic_t refcnt; + refcount_t refcnt; unsigned int seq_nr; unsigned int processed; int cpu; diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5922031ffab6..a558d67ee86f 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -131,7 +131,7 @@ enum pageflags { #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif -#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) PG_young, PG_idle, #endif @@ -178,6 +178,8 @@ enum pageflags { PG_reported = PG_uptodate, }; +#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) + #ifndef __GENERATING_BOUNDS_H static inline unsigned long _compound_head(const struct page *page) @@ -439,7 +441,7 @@ PAGEFLAG_FALSE(HWPoison) #define __PG_HWPOISON 0 #endif -#if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT) +#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT) TESTPAGEFLAG(Young, young, PF_ANY) SETPAGEFLAG(Young, young, PF_ANY) TESTCLEARFLAG(Young, young, PF_ANY) @@ -633,43 +635,6 @@ static inline int PageTransCompound(struct page *page) } /* - * PageTransCompoundMap is the same as PageTransCompound, but it also - * guarantees the primary MMU has the entire compound page mapped - * through pmd_trans_huge, which in turn guarantees the secondary MMUs - * can also map the entire compound page. This allows the secondary - * MMUs to call get_user_pages() only once for each compound page and - * to immediately map the entire compound page with a single secondary - * MMU fault. If there will be a pmd split later, the secondary MMUs - * will get an update through the MMU notifier invalidation through - * split_huge_pmd(). - * - * Unlike PageTransCompound, this is safe to be called only while - * split_huge_pmd() cannot run from under us, like if protected by the - * MMU notifier, otherwise it may result in page->_mapcount check false - * positives. - * - * We have to treat page cache THP differently since every subpage of it - * would get _mapcount inc'ed once it is PMD mapped. But, it may be PTE - * mapped in the current process so comparing subpage's _mapcount to - * compound_mapcount to filter out PTE mapped case. - */ -static inline int PageTransCompoundMap(struct page *page) -{ - struct page *head; - - if (!PageTransCompound(page)) - return 0; - - if (PageAnon(page)) - return atomic_read(&page->_mapcount) < 0; - - head = compound_head(page); - /* File THP is PMD mapped and not PTE mapped */ - return atomic_read(&page->_mapcount) == - atomic_read(compound_mapcount_ptr(head)); -} - -/* * PageTransTail returns true for both transparent huge pages * and hugetlbfs pages, so it should only be called when it's known * that hugetlbfs pages aren't involved. @@ -815,6 +780,15 @@ static inline int PageSlabPfmemalloc(struct page *page) return PageActive(page); } +/* + * A version of PageSlabPfmemalloc() for opportunistic checks where the page + * might have been freed under us and not be a PageSlab anymore. + */ +static inline int __PageSlabPfmemalloc(struct page *page) +{ + return PageActive(page); +} + static inline void SetPageSlabPfmemalloc(struct page *page) { VM_BUG_ON_PAGE(!PageSlab(page), page); @@ -859,7 +833,7 @@ static inline void ClearPageSlabPfmemalloc(struct page *page) * alloc-free cycle to prevent from reusing the page. */ #define PAGE_FLAGS_CHECK_AT_PREP \ - (((1UL << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON) + (PAGEFLAGS_MASK & ~__PG_HWPOISON) #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index aff81ba31bd8..fabb2e1e087f 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -19,7 +19,7 @@ struct page_ext_operations { enum page_ext_flags { PAGE_EXT_OWNER, PAGE_EXT_OWNER_ALLOCATED, -#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT) +#if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, #endif diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h index 1e894d34bdce..d8a6aecf99cb 100644 --- a/include/linux/page_idle.h +++ b/include/linux/page_idle.h @@ -6,7 +6,7 @@ #include <linux/page-flags.h> #include <linux/page_ext.h> -#ifdef CONFIG_IDLE_PAGE_TRACKING +#ifdef CONFIG_PAGE_IDLE_FLAG #ifdef CONFIG_64BIT static inline bool page_is_young(struct page *page) @@ -106,7 +106,7 @@ static inline void clear_page_idle(struct page *page) } #endif /* CONFIG_64BIT */ -#else /* !CONFIG_IDLE_PAGE_TRACKING */ +#else /* !CONFIG_PAGE_IDLE_FLAG */ static inline bool page_is_young(struct page *page) { @@ -135,6 +135,6 @@ static inline void clear_page_idle(struct page *page) { } -#endif /* CONFIG_IDLE_PAGE_TRACKING */ +#endif /* CONFIG_PAGE_IDLE_FLAG */ #endif /* _LINUX_MM_PAGE_IDLE_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ed02aa522263..62db6b0176b9 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -521,18 +521,17 @@ static inline struct page *read_mapping_page(struct address_space *mapping, */ static inline pgoff_t page_to_index(struct page *page) { - pgoff_t pgoff; + struct page *head; if (likely(!PageTransTail(page))) return page->index; + head = compound_head(page); /* * We don't initialize ->index for tail pages: calculate based on * head page */ - pgoff = compound_head(page)->index; - pgoff += page - compound_head(page); - return pgoff; + return head->index + page - head; } extern pgoff_t hugetlb_basepage_index(struct page *page); @@ -736,7 +735,7 @@ extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); /* * Fault everything in given userspace address range in. */ -static inline int fault_in_pages_writeable(char __user *uaddr, int size) +static inline int fault_in_pages_writeable(char __user *uaddr, size_t size) { char __user *end = uaddr + size - 1; @@ -763,7 +762,7 @@ static inline int fault_in_pages_writeable(char __user *uaddr, int size) return 0; } -static inline int fault_in_pages_readable(const char __user *uaddr, int size) +static inline int fault_in_pages_readable(const char __user *uaddr, size_t size) { volatile char c; const char __user *end = uaddr + size - 1; diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 5ba475ca9078..f16de399d2de 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -122,6 +122,9 @@ static inline void pci_acpi_add_edr_notifier(struct pci_dev *pdev) { } static inline void pci_acpi_remove_edr_notifier(struct pci_dev *pdev) { } #endif /* CONFIG_PCIE_EDR */ +int pci_acpi_set_companion_lookup_hook(struct acpi_device *(*func)(struct pci_dev *)); +void pci_acpi_clear_companion_lookup_hook(void); + #else /* CONFIG_ACPI */ static inline void acpi_pci_add_bus(struct pci_bus *bus) { } static inline void acpi_pci_remove_bus(struct pci_bus *bus) { } diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 50a649d33e68..a48778e1a4ee 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -62,31 +62,32 @@ pci_epc_interface_string(enum pci_epc_interface_type type) * @owner: the module owner containing the ops */ struct pci_epc_ops { - int (*write_header)(struct pci_epc *epc, u8 func_no, + int (*write_header)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *hdr); - int (*set_bar)(struct pci_epc *epc, u8 func_no, + int (*set_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); - void (*clear_bar)(struct pci_epc *epc, u8 func_no, + void (*clear_bar)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); - int (*map_addr)(struct pci_epc *epc, u8 func_no, + int (*map_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr, u64 pci_addr, size_t size); - void (*unmap_addr)(struct pci_epc *epc, u8 func_no, + void (*unmap_addr)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t addr); - int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); - int (*get_msi)(struct pci_epc *epc, u8 func_no); - int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts, - enum pci_barno, u32 offset); - int (*get_msix)(struct pci_epc *epc, u8 func_no); - int (*raise_irq)(struct pci_epc *epc, u8 func_no, + int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u8 interrupts); + int (*get_msi)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); + int (*set_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u16 interrupts, enum pci_barno, u32 offset); + int (*get_msix)(struct pci_epc *epc, u8 func_no, u8 vfunc_no); + int (*raise_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, enum pci_epc_irq_type type, u16 interrupt_num); - int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, + int (*map_msi_irq)(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, u32 *msi_addr_offset); int (*start)(struct pci_epc *epc); void (*stop)(struct pci_epc *epc); const struct pci_epc_features* (*get_features)(struct pci_epc *epc, - u8 func_no); + u8 func_no, u8 vfunc_no); struct module *owner; }; @@ -128,6 +129,8 @@ struct pci_epc_mem { * single window. * @num_windows: number of windows supported by device * @max_functions: max number of functions that can be configured in this EPC + * @max_vfs: Array indicating the maximum number of virtual functions that can + * be associated with each physical function * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops * @function_num_map: bitmap to manage physical function number @@ -141,6 +144,7 @@ struct pci_epc { struct pci_epc_mem *mem; unsigned int num_windows; u8 max_functions; + u8 *max_vfs; struct config_group *group; /* mutex to protect against concurrent access of EP controller */ struct mutex lock; @@ -208,31 +212,32 @@ void pci_epc_linkup(struct pci_epc *epc); void pci_epc_init_notify(struct pci_epc *epc); void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, enum pci_epc_interface_type type); -int pci_epc_write_header(struct pci_epc *epc, u8 func_no, +int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_header *hdr); -int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, +int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); -void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, +void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, struct pci_epf_bar *epf_bar); -int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, +int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u64 pci_addr, size_t size); -void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, +void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr); -int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); -int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); -int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts, - enum pci_barno, u32 offset); -int pci_epc_get_msix(struct pci_epc *epc, u8 func_no); -int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, +int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u8 interrupts); +int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no); +int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, + u16 interrupts, enum pci_barno, u32 offset); +int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no); +int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, u32 *msi_data, u32 *msi_addr_offset); -int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, +int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, enum pci_epc_irq_type type, u16 interrupt_num); int pci_epc_start(struct pci_epc *epc); void pci_epc_stop(struct pci_epc *epc); const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, - u8 func_no); + u8 func_no, u8 vfunc_no); enum pci_barno pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features); enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 8292420426f3..009a07147c61 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -121,8 +121,10 @@ struct pci_epf_bar { * @bar: represents the BAR of EPF device * @msi_interrupts: number of MSI interrupts required by this function * @msix_interrupts: number of MSI-X interrupts required by this function - * @func_no: unique function number within this endpoint device + * @func_no: unique (physical) function number within this endpoint device + * @vfunc_no: unique virtual function number within a physical function * @epc: the EPC device to which this EPF device is bound + * @epf_pf: the physical EPF device to which this virtual EPF device is bound * @driver: the EPF driver to which this EPF device is bound * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc * @nb: notifier block to notify EPF of any EPC events (like linkup) @@ -133,6 +135,10 @@ struct pci_epf_bar { * @sec_epc_bar: represents the BAR of EPF device associated with secondary EPC * @sec_epc_func_no: unique (physical) function number within the secondary EPC * @group: configfs group associated with the EPF device + * @is_bound: indicates if bind notification to function driver has been invoked + * @is_vf: true - virtual function, false - physical function + * @vfunction_num_map: bitmap to manage virtual function number + * @pci_vepf: list of virtual endpoint functions associated with this function */ struct pci_epf { struct device dev; @@ -142,8 +148,10 @@ struct pci_epf { u8 msi_interrupts; u16 msix_interrupts; u8 func_no; + u8 vfunc_no; struct pci_epc *epc; + struct pci_epf *epf_pf; struct pci_epf_driver *driver; struct list_head list; struct notifier_block nb; @@ -156,6 +164,10 @@ struct pci_epf { struct pci_epf_bar sec_epc_bar[6]; u8 sec_epc_func_no; struct config_group *group; + unsigned int is_bound; + unsigned int is_vf; + unsigned long vfunction_num_map; + struct list_head pci_vepf; }; /** @@ -199,4 +211,6 @@ int pci_epf_bind(struct pci_epf *epf); void pci_epf_unbind(struct pci_epf *epf); struct config_group *pci_epf_type_add_cfs(struct pci_epf *epf, struct config_group *group); +int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf); +void pci_epf_remove_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf); #endif /* __LINUX_PCI_EPF_H */ diff --git a/include/linux/pci.h b/include/linux/pci.h index 540b377ca8f6..cd8aa6fce204 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -49,6 +49,12 @@ PCI_STATUS_SIG_TARGET_ABORT | \ PCI_STATUS_PARITY) +/* Number of reset methods used in pci_reset_fn_methods array in pci.c */ +#define PCI_NUM_RESET_METHODS 7 + +#define PCI_RESET_PROBE true +#define PCI_RESET_DO_RESET false + /* * The PCI interface treats multi-function devices as independent * devices. The slot/function address of each device is encoded @@ -288,21 +294,14 @@ enum pci_bus_speed { enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev); enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev); -struct pci_cap_saved_data { - u16 cap_nr; - bool cap_extended; - unsigned int size; - u32 data[]; -}; - -struct pci_cap_saved_state { - struct hlist_node next; - struct pci_cap_saved_data cap; +struct pci_vpd { + struct mutex lock; + unsigned int len; + u8 cap; }; struct irq_affinity; struct pcie_link_state; -struct pci_vpd; struct pci_sriov; struct pci_p2pdma; struct rcec_ea; @@ -333,6 +332,7 @@ struct pci_dev { struct rcec_ea *rcec_ea; /* RCEC cached endpoint association */ struct pci_dev *rcec; /* Associated RCEC device */ #endif + u32 devcap; /* PCIe Device Capabilities */ u8 pcie_cap; /* PCIe capability offset */ u8 msi_cap; /* MSI capability offset */ u8 msix_cap; /* MSI-X capability offset */ @@ -388,6 +388,7 @@ struct pci_dev { supported from root to here */ u16 l1ss; /* L1SS Capability pointer */ #endif + unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */ unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */ pci_channel_state_t error_state; /* Current connectivity state */ @@ -427,7 +428,6 @@ struct pci_dev { unsigned int state_saved:1; unsigned int is_physfn:1; unsigned int is_virtfn:1; - unsigned int reset_fn:1; unsigned int is_hotplug_bridge:1; unsigned int shpc_managed:1; /* SHPC owned by shpchp */ unsigned int is_thunderbolt:1; /* Thunderbolt controller */ @@ -473,7 +473,7 @@ struct pci_dev { #ifdef CONFIG_PCI_MSI const struct attribute_group **msi_irq_groups; #endif - struct pci_vpd *vpd; + struct pci_vpd vpd; #ifdef CONFIG_PCIE_DPC u16 dpc_cap; unsigned int dpc_rp_extensions:1; @@ -505,6 +505,9 @@ struct pci_dev { char *driver_override; /* Driver name to force a match */ unsigned long priv_flags; /* Private flags for the PCI driver */ + + /* These methods index pci_reset_fn_methods[] */ + u8 reset_methods[PCI_NUM_RESET_METHODS]; /* In priority order */ }; static inline struct pci_dev *pci_physfn(struct pci_dev *dev) @@ -526,6 +529,16 @@ static inline int pci_channel_offline(struct pci_dev *pdev) return (pdev->error_state != pci_channel_io_normal); } +/* + * Currently in ACPI spec, for each PCI host bridge, PCI Segment + * Group number is limited to a 16-bit value, therefore (int)-1 is + * not a valid PCI domain number, and can be used as a sentinel + * value indicating ->domain_nr is not set by the driver (and + * CONFIG_PCI_DOMAINS_GENERIC=y archs will set it with + * pci_bus_find_domain_nr()). + */ +#define PCI_DOMAIN_NR_NOT_SET (-1) + struct pci_host_bridge { struct device dev; struct pci_bus *bus; /* Root bus */ @@ -533,6 +546,7 @@ struct pci_host_bridge { struct pci_ops *child_ops; void *sysdata; int busnr; + int domain_nr; struct list_head windows; /* resource_entry */ struct list_head dma_ranges; /* dma ranges resource list */ u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ @@ -902,6 +916,35 @@ struct pci_driver { .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID /** + * PCI_DEVICE_DRIVER_OVERRIDE - macro used to describe a PCI device with + * override_only flags. + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * @driver_override: the 32 bit PCI Device override_only + * + * This macro is used to create a struct pci_device_id that matches only a + * driver_override device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID. + */ +#define PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \ + .vendor = (vend), .device = (dev), .subvendor = PCI_ANY_ID, \ + .subdevice = PCI_ANY_ID, .override_only = (driver_override) + +/** + * PCI_DRIVER_OVERRIDE_DEVICE_VFIO - macro used to describe a VFIO + * "driver_override" PCI device. + * @vend: the 16 bit PCI Vendor ID + * @dev: the 16 bit PCI Device ID + * + * This macro is used to create a struct pci_device_id that matches a + * specific device. The subvendor and subdevice fields will be set to + * PCI_ANY_ID and the driver_override will be set to + * PCI_ID_F_VFIO_DRIVER_OVERRIDE. + */ +#define PCI_DRIVER_OVERRIDE_DEVICE_VFIO(vend, dev) \ + PCI_DEVICE_DRIVER_OVERRIDE(vend, dev, PCI_ID_F_VFIO_DRIVER_OVERRIDE) + +/** * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem * @vend: the 16 bit PCI Vendor ID * @dev: the 16 bit PCI Device ID @@ -1228,7 +1271,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev, enum pci_bus_speed *speed, enum pcie_link_width *width); void pcie_print_link_status(struct pci_dev *dev); -bool pcie_has_flr(struct pci_dev *dev); +int pcie_reset_flr(struct pci_dev *dev, bool probe); int pcie_flr(struct pci_dev *dev); int __pci_reset_function_locked(struct pci_dev *dev); int pci_reset_function(struct pci_dev *dev); @@ -1278,12 +1321,6 @@ int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state); int pci_load_and_free_saved_state(struct pci_dev *dev, struct pci_saved_state **state); -struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); -struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, - u16 cap); -int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size); -int pci_add_ext_cap_save_buffer(struct pci_dev *dev, - u16 cap, unsigned int size); int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state); int pci_set_power_state(struct pci_dev *dev, pci_power_t state); pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state); @@ -1620,6 +1657,16 @@ static inline bool pci_aer_available(void) { return false; } bool pci_ats_disabled(void); +#ifdef CONFIG_PCIE_PTM +int pci_enable_ptm(struct pci_dev *dev, u8 *granularity); +bool pcie_ptm_enabled(struct pci_dev *dev); +#else +static inline int pci_enable_ptm(struct pci_dev *dev, u8 *granularity) +{ return -EINVAL; } +static inline bool pcie_ptm_enabled(struct pci_dev *dev) +{ return false; } +#endif + void pci_cfg_access_lock(struct pci_dev *dev); bool pci_cfg_access_trylock(struct pci_dev *dev); void pci_cfg_access_unlock(struct pci_dev *dev); @@ -1740,8 +1787,9 @@ static inline void pci_disable_device(struct pci_dev *dev) { } static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; } static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY; } -static inline int __pci_register_driver(struct pci_driver *drv, - struct module *owner) +static inline int __must_check __pci_register_driver(struct pci_driver *drv, + struct module *owner, + const char *mod_name) { return 0; } static inline int pci_register_driver(struct pci_driver *drv) { return 0; } @@ -1881,9 +1929,7 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) #define pci_resource_len(dev,bar) \ - ((pci_resource_start((dev), (bar)) == 0 && \ - pci_resource_end((dev), (bar)) == \ - pci_resource_start((dev), (bar))) ? 0 : \ + ((pci_resource_end((dev), (bar)) == 0) ? 0 : \ \ (pci_resource_end((dev), (bar)) - \ pci_resource_start((dev), (bar)) + 1)) @@ -2250,20 +2296,6 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_LRDT_RO_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RO_DATA) #define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA) -/* Small Resource Data Type Tag Item Names */ -#define PCI_VPD_STIN_END 0x0f /* End */ - -#define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3) - -#define PCI_VPD_SRDT_TIN_MASK 0x78 -#define PCI_VPD_SRDT_LEN_MASK 0x07 -#define PCI_VPD_LRDT_TIN_MASK 0x7f - -#define PCI_VPD_LRDT_TAG_SIZE 3 -#define PCI_VPD_SRDT_TAG_SIZE 1 - -#define PCI_VPD_INFO_FLD_HDR_SIZE 3 - #define PCI_VPD_RO_KEYWORD_PARTNO "PN" #define PCI_VPD_RO_KEYWORD_SERIALNO "SN" #define PCI_VPD_RO_KEYWORD_MFR_ID "MN" @@ -2271,83 +2303,45 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); #define PCI_VPD_RO_KEYWORD_CHKSUM "RV" /** - * pci_vpd_lrdt_size - Extracts the Large Resource Data Type length - * @lrdt: Pointer to the beginning of the Large Resource Data Type tag - * - * Returns the extracted Large Resource Data Type length. - */ -static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) -{ - return (u16)lrdt[1] + ((u16)lrdt[2] << 8); -} - -/** - * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item - * @lrdt: Pointer to the beginning of the Large Resource Data Type tag - * - * Returns the extracted Large Resource Data Type Tag item. - */ -static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) -{ - return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); -} - -/** - * pci_vpd_srdt_size - Extracts the Small Resource Data Type length - * @srdt: Pointer to the beginning of the Small Resource Data Type tag - * - * Returns the extracted Small Resource Data Type length. - */ -static inline u8 pci_vpd_srdt_size(const u8 *srdt) -{ - return (*srdt) & PCI_VPD_SRDT_LEN_MASK; -} - -/** - * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item - * @srdt: Pointer to the beginning of the Small Resource Data Type tag + * pci_vpd_alloc - Allocate buffer and read VPD into it + * @dev: PCI device + * @size: pointer to field where VPD length is returned * - * Returns the extracted Small Resource Data Type Tag Item. + * Returns pointer to allocated buffer or an ERR_PTR in case of failure */ -static inline u8 pci_vpd_srdt_tag(const u8 *srdt) -{ - return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3; -} +void *pci_vpd_alloc(struct pci_dev *dev, unsigned int *size); /** - * pci_vpd_info_field_size - Extracts the information field length - * @info_field: Pointer to the beginning of an information field header + * pci_vpd_find_id_string - Locate id string in VPD + * @buf: Pointer to buffered VPD data + * @len: The length of the buffer area in which to search + * @size: Pointer to field where length of id string is returned * - * Returns the extracted information field length. + * Returns the index of the id string or -ENOENT if not found. */ -static inline u8 pci_vpd_info_field_size(const u8 *info_field) -{ - return info_field[2]; -} +int pci_vpd_find_id_string(const u8 *buf, unsigned int len, unsigned int *size); /** - * pci_vpd_find_tag - Locates the Resource Data Type tag provided - * @buf: Pointer to buffered vpd data - * @len: The length of the vpd buffer - * @rdt: The Resource Data Type to search for + * pci_vpd_find_ro_info_keyword - Locate info field keyword in VPD RO section + * @buf: Pointer to buffered VPD data + * @len: The length of the buffer area in which to search + * @kw: The keyword to search for + * @size: Pointer to field where length of found keyword data is returned * - * Returns the index where the Resource Data Type was found or - * -ENOENT otherwise. + * Returns the index of the information field keyword data or -ENOENT if + * not found. */ -int pci_vpd_find_tag(const u8 *buf, unsigned int len, u8 rdt); +int pci_vpd_find_ro_info_keyword(const void *buf, unsigned int len, + const char *kw, unsigned int *size); /** - * pci_vpd_find_info_keyword - Locates an information field keyword in the VPD - * @buf: Pointer to buffered vpd data - * @off: The offset into the buffer at which to begin the search - * @len: The length of the buffer area, relative to off, in which to search - * @kw: The keyword to search for + * pci_vpd_check_csum - Check VPD checksum + * @buf: Pointer to buffered VPD data + * @len: VPD size * - * Returns the index where the information field keyword was found or - * -ENOENT otherwise. + * Returns 1 if VPD has no checksum, otherwise 0 or an errno */ -int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off, - unsigned int len, const char *kw); +int pci_vpd_check_csum(const void *buf, unsigned int len); /* PCI <-> OF binding helpers */ #ifdef CONFIG_OF diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2dac431d94ac..3a10d6ec3ee7 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -44,7 +44,7 @@ struct hotplug_slot_ops { int (*get_attention_status) (struct hotplug_slot *slot, u8 *value); int (*get_latch_status) (struct hotplug_slot *slot, u8 *value); int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value); - int (*reset_slot) (struct hotplug_slot *slot, int probe); + int (*reset_slot) (struct hotplug_slot *slot, bool probe); }; /** diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 4bac1831de80..011f2f1ea5bb 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -555,6 +555,7 @@ #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F3 0x144b #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443 #define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653 +#define PCI_DEVICE_ID_AMD_19H_M40H_DF_F3 0x167c #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F3 0x166d #define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703 #define PCI_DEVICE_ID_AMD_LANCE 0x2000 @@ -1121,6 +1122,7 @@ #define PCI_DEVICE_ID_3COM_3CR990SVR 0x990a #define PCI_VENDOR_ID_AL 0x10b9 +#define PCI_DEVICE_ID_AL_M1489 0x1489 #define PCI_DEVICE_ID_AL_M1533 0x1533 #define PCI_DEVICE_ID_AL_M1535 0x1535 #define PCI_DEVICE_ID_AL_M1541 0x1541 @@ -2451,7 +2453,8 @@ #define PCI_VENDOR_ID_TDI 0x192E #define PCI_DEVICE_ID_TDI_EHCI 0x0101 -#define PCI_VENDOR_ID_FREESCALE 0x1957 +#define PCI_VENDOR_ID_FREESCALE 0x1957 /* duplicate: NXP */ +#define PCI_VENDOR_ID_NXP 0x1957 /* duplicate: FREESCALE */ #define PCI_DEVICE_ID_MPC8308 0xc006 #define PCI_DEVICE_ID_MPC8315E 0x00b4 #define PCI_DEVICE_ID_MPC8315 0x00b5 @@ -2643,6 +2646,7 @@ #define PCI_DEVICE_ID_INTEL_82375 0x0482 #define PCI_DEVICE_ID_INTEL_82424 0x0483 #define PCI_DEVICE_ID_INTEL_82378 0x0484 +#define PCI_DEVICE_ID_INTEL_82425 0x0486 #define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807 #define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808 #define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2d510ad750ed..fe156a8170aa 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -762,6 +762,7 @@ struct perf_event { #ifdef CONFIG_BPF_SYSCALL perf_overflow_handler_t orig_overflow_handler; struct bpf_prog *prog; + u64 bpf_cookie; #endif #ifdef CONFIG_EVENT_TRACING diff --git a/include/linux/phy.h b/include/linux/phy.h index 3b80dc3ed68b..736e1d1a47c4 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -1431,6 +1431,7 @@ static inline int phy_device_register(struct phy_device *phy) static inline void phy_device_free(struct phy_device *phydev) { } #endif /* CONFIG_PHYLIB */ void phy_device_remove(struct phy_device *phydev); +int phy_get_c45_ids(struct phy_device *phydev); int phy_init_hw(struct phy_device *phydev); int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); diff --git a/include/linux/phylink.h b/include/linux/phylink.h index afb3ded0b691..237291196ce2 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -451,6 +451,9 @@ void phylink_mac_change(struct phylink *, bool up); void phylink_start(struct phylink *); void phylink_stop(struct phylink *); +void phylink_suspend(struct phylink *pl, bool mac_wol); +void phylink_resume(struct phylink *pl); + void phylink_ethtool_get_wol(struct phylink *, struct ethtool_wolinfo *); int phylink_ethtool_set_wol(struct phylink *, struct ethtool_wolinfo *); diff --git a/include/linux/pid.h b/include/linux/pid.h index fa10acb8d6a4..af308e15f174 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -78,6 +78,7 @@ struct file; extern struct pid *pidfd_pid(const struct file *file); struct pid *pidfd_get_pid(unsigned int fd, unsigned int *flags); +int pidfd_create(struct pid *pid, unsigned int flags); static inline struct pid *get_pid(struct pid *pid) { diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 5d2705f1d01c..fc5642431b92 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -48,6 +48,7 @@ struct pipe_buffer { * @files: number of struct file referring this pipe (protected by ->i_lock) * @r_counter: reader counter * @w_counter: writer counter + * @poll_usage: is this pipe used for epoll, which has crazy wakeups? * @fasync_readers: reader side fasync * @fasync_writers: writer side fasync * @bufs: the circular array of pipe buffers @@ -70,6 +71,7 @@ struct pipe_inode_info { unsigned int files; unsigned int r_counter; unsigned int w_counter; + unsigned int poll_usage; struct page *tmp_page; struct fasync_struct *fasync_readers; struct fasync_struct *fasync_writers; diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h index 45f53afc46e2..271bd87bff0a 100644 --- a/include/linux/platform_data/cros_ec_commands.h +++ b/include/linux/platform_data/cros_ec_commands.h @@ -4228,6 +4228,7 @@ enum ec_device_event { EC_DEVICE_EVENT_TRACKPAD, EC_DEVICE_EVENT_DSP, EC_DEVICE_EVENT_WIFI, + EC_DEVICE_EVENT_WLC, }; enum ec_device_event_param { @@ -5460,6 +5461,72 @@ struct ec_response_rollback_info { /* Issue AP reset */ #define EC_CMD_AP_RESET 0x0125 +/** + * Get the number of peripheral charge ports + */ +#define EC_CMD_PCHG_COUNT 0x0134 + +#define EC_PCHG_MAX_PORTS 8 + +struct ec_response_pchg_count { + uint8_t port_count; +} __ec_align1; + +/** + * Get the status of a peripheral charge port + */ +#define EC_CMD_PCHG 0x0135 + +struct ec_params_pchg { + uint8_t port; +} __ec_align1; + +struct ec_response_pchg { + uint32_t error; /* enum pchg_error */ + uint8_t state; /* enum pchg_state state */ + uint8_t battery_percentage; + uint8_t unused0; + uint8_t unused1; + /* Fields added in version 1 */ + uint32_t fw_version; + uint32_t dropped_event_count; +} __ec_align2; + +enum pchg_state { + /* Charger is reset and not initialized. */ + PCHG_STATE_RESET = 0, + /* Charger is initialized or disabled. */ + PCHG_STATE_INITIALIZED, + /* Charger is enabled and ready to detect a device. */ + PCHG_STATE_ENABLED, + /* Device is in proximity. */ + PCHG_STATE_DETECTED, + /* Device is being charged. */ + PCHG_STATE_CHARGING, + /* Device is fully charged. It implies DETECTED (& not charging). */ + PCHG_STATE_FULL, + /* In download (a.k.a. firmware update) mode */ + PCHG_STATE_DOWNLOAD, + /* In download mode. Ready for receiving data. */ + PCHG_STATE_DOWNLOADING, + /* Device is ready for data communication. */ + PCHG_STATE_CONNECTED, + /* Put no more entry below */ + PCHG_STATE_COUNT, +}; + +#define EC_PCHG_STATE_TEXT { \ + [PCHG_STATE_RESET] = "RESET", \ + [PCHG_STATE_INITIALIZED] = "INITIALIZED", \ + [PCHG_STATE_ENABLED] = "ENABLED", \ + [PCHG_STATE_DETECTED] = "DETECTED", \ + [PCHG_STATE_CHARGING] = "CHARGING", \ + [PCHG_STATE_FULL] = "FULL", \ + [PCHG_STATE_DOWNLOAD] = "DOWNLOAD", \ + [PCHG_STATE_DOWNLOADING] = "DOWNLOADING", \ + [PCHG_STATE_CONNECTED] = "CONNECTED", \ + } + /*****************************************************************************/ /* Voltage regulator controls */ diff --git a/include/linux/platform_data/davinci_asp.h b/include/linux/platform_data/davinci_asp.h index 5d1fb0d78a22..76b13ef67562 100644 --- a/include/linux/platform_data/davinci_asp.h +++ b/include/linux/platform_data/davinci_asp.h @@ -96,6 +96,7 @@ enum { MCASP_VERSION_2, /* DA8xx/OMAPL1x */ MCASP_VERSION_3, /* TI81xx/AM33xx */ MCASP_VERSION_4, /* DRA7xxx */ + MCASP_VERSION_OMAP, /* OMAP4/5 */ }; enum mcbsp_clk_input_pin { diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index b34a094b2258..860ba4bc5ead 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -41,36 +41,39 @@ struct dw_dma_slave { /** * struct dw_dma_platform_data - Controller configuration parameters + * @nr_masters: Number of AHB masters supported by the controller * @nr_channels: Number of channels supported by hardware (max 8) * @chan_allocation_order: Allocate channels starting from 0 or 7 * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. * @block_size: Maximum block size supported by the controller - * @nr_masters: Number of AHB masters supported by the controller * @data_width: Maximum data width supported by hardware per AHB master * (in bytes, power of 2) * @multi_block: Multi block transfers supported by hardware per channel. * @max_burst: Maximum value of burst transaction size supported by hardware * per channel (in units of CTL.SRC_TR_WIDTH/CTL.DST_TR_WIDTH). * @protctl: Protection control signals setting per channel. + * @quirks: Optional platform quirks. */ struct dw_dma_platform_data { - unsigned int nr_channels; + u32 nr_masters; + u32 nr_channels; #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ - unsigned char chan_allocation_order; + u32 chan_allocation_order; #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ - unsigned char chan_priority; - unsigned int block_size; - unsigned char nr_masters; - unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; - unsigned char multi_block[DW_DMA_MAX_NR_CHANNELS]; + u32 chan_priority; + u32 block_size; + u32 data_width[DW_DMA_MAX_NR_MASTERS]; + u32 multi_block[DW_DMA_MAX_NR_CHANNELS]; u32 max_burst[DW_DMA_MAX_NR_CHANNELS]; #define CHAN_PROTCTL_PRIVILEGED BIT(0) #define CHAN_PROTCTL_BUFFERABLE BIT(1) #define CHAN_PROTCTL_CACHEABLE BIT(2) #define CHAN_PROTCTL_MASK GENMASK(2, 0) - unsigned char protctl; + u32 protctl; +#define DW_DMA_QUIRK_XBAR_PRESENT BIT(0) + u32 quirks; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ diff --git a/include/linux/platform_data/gpio-dwapb.h b/include/linux/platform_data/gpio-dwapb.h deleted file mode 100644 index 0aa5c6720259..000000000000 --- a/include/linux/platform_data/gpio-dwapb.h +++ /dev/null @@ -1,25 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright(c) 2014 Intel Corporation. - */ - -#ifndef GPIO_DW_APB_H -#define GPIO_DW_APB_H - -#define DWAPB_MAX_GPIOS 32 - -struct dwapb_port_property { - struct fwnode_handle *fwnode; - unsigned int idx; - unsigned int ngpio; - unsigned int gpio_base; - int irq[DWAPB_MAX_GPIOS]; - bool irq_shared; -}; - -struct dwapb_platform_data { - struct dwapb_port_property *properties; - unsigned int nports; -}; - -#endif diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h deleted file mode 100644 index cba1184b364c..000000000000 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2010 Wolfram Sang <kernel@pengutronix.de> - */ - -#ifndef __ASM_ARCH_IMX_ESDHC_H -#define __ASM_ARCH_IMX_ESDHC_H - -#include <linux/types.h> - -enum wp_types { - ESDHC_WP_NONE, /* no WP, neither controller nor gpio */ - ESDHC_WP_CONTROLLER, /* mmc controller internal WP */ - ESDHC_WP_GPIO, /* external gpio pin for WP */ -}; - -enum cd_types { - ESDHC_CD_NONE, /* no CD, neither controller nor gpio */ - ESDHC_CD_CONTROLLER, /* mmc controller internal CD */ - ESDHC_CD_GPIO, /* external gpio pin for CD */ - ESDHC_CD_PERMANENT, /* no CD, card permanently wired to host */ -}; - -/** - * struct esdhc_platform_data - platform data for esdhc on i.MX - * - * ESDHC_WP(CD)_CONTROLLER type is not available on i.MX25/35. - * - * @wp_type: type of write_protect method (see wp_types enum above) - * @cd_type: type of card_detect method (see cd_types enum above) - */ - -struct esdhc_platform_data { - enum wp_types wp_type; - enum cd_types cd_type; - int max_bus_width; - unsigned int delay_line; - unsigned int tuning_step; /* The delay cell steps in tuning procedure */ - unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */ - unsigned int strobe_dll_delay_target; /* The delay cell for strobe pad (read clock) */ -}; -#endif /* __ASM_ARCH_IMX_ESDHC_H */ diff --git a/include/linux/platform_data/pata_ixp4xx_cf.h b/include/linux/platform_data/pata_ixp4xx_cf.h index 601ba97fef57..e60fa41da4a5 100644 --- a/include/linux/platform_data/pata_ixp4xx_cf.h +++ b/include/linux/platform_data/pata_ixp4xx_cf.h @@ -14,8 +14,8 @@ struct ixp4xx_pata_data { volatile u32 *cs1_cfg; unsigned long cs0_bits; unsigned long cs1_bits; - void __iomem *cs0; - void __iomem *cs1; + void __iomem *cmd; + void __iomem *ctl; }; #endif diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h index 65fd5ffd257c..f0db674f07b8 100644 --- a/include/linux/platform_data/spi-mt65xx.h +++ b/include/linux/platform_data/spi-mt65xx.h @@ -12,5 +12,6 @@ /* Board specific platform_data */ struct mtk_chip_config { u32 sample_sel; + u32 tick_delay; }; #endif diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 2f274cf52805..17dc5cb6f3f2 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -61,6 +61,7 @@ #define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075 /* Misc */ +#define ASUS_WMI_DEVID_PANEL_OD 0x00050019 #define ASUS_WMI_DEVID_CAMERA 0x00060013 #define ASUS_WMI_DEVID_LID_FLIP 0x00060062 @@ -89,6 +90,12 @@ /* Keyboard dock */ #define ASUS_WMI_DEVID_KBD_DOCK 0x00120063 +/* dgpu on/off */ +#define ASUS_WMI_DEVID_EGPU 0x00090019 + +/* dgpu on/off */ +#define ASUS_WMI_DEVID_DGPU 0x00090020 + /* DSTS masks */ #define ASUS_WMI_DSTS_STATUS_BIT 0x00000001 #define ASUS_WMI_DSTS_UNKNOWN_BIT 0x00000002 diff --git a/include/linux/platform_data/x86/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h index 207e1a317800..41df326583f9 100644 --- a/include/linux/platform_data/x86/clk-lpss.h +++ b/include/linux/platform_data/x86/clk-lpss.h @@ -15,6 +15,6 @@ struct lpss_clk_data { struct clk *clk; }; -extern int lpt_clk_init(void); +extern int lpss_atom_clk_init(void); #endif /* __CLK_LPSS_H */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index ed42ea9f60ba..7c96f169d274 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -197,8 +197,6 @@ extern int platform_device_add_resources(struct platform_device *pdev, unsigned int num); extern int platform_device_add_data(struct platform_device *pdev, const void *data, size_t size); -extern int platform_device_add_properties(struct platform_device *pdev, - const struct property_entry *properties); extern int platform_device_add(struct platform_device *pdev); extern void platform_device_del(struct platform_device *pdev); extern void platform_device_put(struct platform_device *pdev); diff --git a/include/linux/pm_clock.h b/include/linux/pm_clock.h index 8ddc7860e131..ada3a0ab10bf 100644 --- a/include/linux/pm_clock.h +++ b/include/linux/pm_clock.h @@ -47,6 +47,7 @@ extern void pm_clk_remove(struct device *dev, const char *con_id); extern void pm_clk_remove_clk(struct device *dev, struct clk *clk); extern int pm_clk_suspend(struct device *dev); extern int pm_clk_resume(struct device *dev); +extern int devm_pm_clk_create(struct device *dev); #else static inline bool pm_clk_no_clocks(struct device *dev) { @@ -83,6 +84,10 @@ static inline void pm_clk_remove(struct device *dev, const char *con_id) static inline void pm_clk_remove_clk(struct device *dev, struct clk *clk) { } +static inline int devm_pm_clk_create(struct device *dev) +{ + return -EINVAL; +} #endif #ifdef CONFIG_HAVE_CLK diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 21a0577305ef..67017c9390c8 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -198,6 +198,7 @@ struct generic_pm_domain_data { struct notifier_block *power_nb; int cpu; unsigned int performance_state; + unsigned int default_pstate; unsigned int rpm_pstate; ktime_t next_wakeup; void *data; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index aab8b35e9f8a..222da43b7096 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -59,6 +59,8 @@ extern void pm_runtime_put_suppliers(struct device *dev); extern void pm_runtime_new_link(struct device *dev); extern void pm_runtime_drop_link(struct device_link *link); +extern int devm_pm_runtime_enable(struct device *dev); + /** * pm_runtime_get_if_in_use - Conditionally bump up runtime PM usage counter. * @dev: Target device. @@ -253,6 +255,8 @@ static inline void __pm_runtime_disable(struct device *dev, bool c) {} static inline void pm_runtime_allow(struct device *dev) {} static inline void pm_runtime_forbid(struct device *dev) {} +static inline int devm_pm_runtime_enable(struct device *dev) { return 0; } + static inline void pm_suspend_ignore_children(struct device *dev, bool enable) {} static inline void pm_runtime_get_noresume(struct device *dev) {} static inline void pm_runtime_put_noidle(struct device *dev) {} diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 896c16d2c5fb..00fef0064355 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -82,12 +82,19 @@ static inline bool cpu_timer_enqueue(struct timerqueue_head *head, return timerqueue_add(head, &ctmr->node); } -static inline void cpu_timer_dequeue(struct cpu_timer *ctmr) +static inline bool cpu_timer_queued(struct cpu_timer *ctmr) { - if (ctmr->head) { + return !!ctmr->head; +} + +static inline bool cpu_timer_dequeue(struct cpu_timer *ctmr) +{ + if (cpu_timer_queued(ctmr)) { timerqueue_del(ctmr->head, &ctmr->node); ctmr->head = NULL; + return true; } + return false; } static inline u64 cpu_timer_getexpires(struct cpu_timer *ctmr) diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index 307094ebb88c..b65c877d92b8 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h @@ -72,6 +72,8 @@ extern struct posix_acl *get_posix_acl(struct inode *, int); extern int set_posix_acl(struct user_namespace *, struct inode *, int, struct posix_acl *); +struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); + #ifdef CONFIG_FS_POSIX_ACL int posix_acl_chmod(struct user_namespace *, struct inode *, umode_t); extern int posix_acl_create(struct inode *, umode_t *, struct posix_acl **, @@ -84,7 +86,6 @@ extern int simple_set_acl(struct user_namespace *, struct inode *, extern int simple_acl_create(struct inode *, struct inode *); struct posix_acl *get_cached_acl(struct inode *inode, int type); -struct posix_acl *get_cached_acl_rcu(struct inode *inode, int type); void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl); void forget_cached_acl(struct inode *inode, int type); void forget_all_cached_acls(struct inode *inode); diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h index d55c746ac56e..dd24756a8af7 100644 --- a/include/linux/power/max17042_battery.h +++ b/include/linux/power/max17042_battery.h @@ -69,7 +69,7 @@ enum max17042_register { MAX17042_RelaxCFG = 0x2A, MAX17042_MiscCFG = 0x2B, MAX17042_TGAIN = 0x2C, - MAx17042_TOFF = 0x2D, + MAX17042_TOFF = 0x2D, MAX17042_CGAIN = 0x2E, MAX17042_COFF = 0x2F, @@ -110,13 +110,14 @@ enum max17042_register { MAX17042_VFSOC = 0xFF, }; +/* Registers specific to max17055 only */ enum max17055_register { MAX17055_QRes = 0x0C, + MAX17055_RCell = 0x14, MAX17055_TTF = 0x20, - MAX17055_V_empty = 0x3A, - MAX17055_TIMER = 0x3E, + MAX17055_DieTemp = 0x34, MAX17055_USER_MEM = 0x40, - MAX17055_RGAIN = 0x42, + MAX17055_RGAIN = 0x43, MAX17055_ConvgCfg = 0x49, MAX17055_VFRemCap = 0x4A, @@ -155,13 +156,14 @@ enum max17055_register { MAX17055_AtAvCap = 0xDF, }; -/* Registers specific to max17047/50 */ +/* Registers specific to max17047/50/55 */ enum max17047_register { MAX17047_QRTbl00 = 0x12, MAX17047_FullSOCThr = 0x13, MAX17047_QRTbl10 = 0x22, MAX17047_QRTbl20 = 0x32, MAX17047_V_empty = 0x3A, + MAX17047_TIMER = 0x3E, MAX17047_QRTbl30 = 0x42, }; diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h index 971c9264179e..167b9b040091 100644 --- a/include/linux/power/smartreflex.h +++ b/include/linux/power/smartreflex.h @@ -155,6 +155,7 @@ struct omap_sr { struct voltagedomain *voltdm; struct dentry *dbg_dir; unsigned int irq; + struct clk *fck; int srid; int ip_type; int nvalue_count; @@ -169,6 +170,7 @@ struct omap_sr { u32 senp_mod; u32 senn_mod; void __iomem *base; + unsigned long enabled:1; }; /** diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index be203985ecdd..9ca1f120a211 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -352,6 +352,7 @@ struct power_supply_resistance_temp_table { */ struct power_supply_battery_info { + unsigned int technology; /* from the enum above */ int energy_full_design_uwh; /* microWatt-hours */ int charge_full_design_uah; /* microAmp-hours */ int voltage_min_design_uv; /* microVolts */ diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 9881eac0698f..4d244e295e85 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -121,7 +121,11 @@ /* * The preempt_count offset after spin_lock() */ +#if !defined(CONFIG_PREEMPT_RT) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif /* * The preempt_count offset needed for things like: diff --git a/include/linux/printk.h b/include/linux/printk.h index e834d78f0478..85b656f82d75 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -2,7 +2,7 @@ #ifndef __KERNEL_PRINTK__ #define __KERNEL_PRINTK__ -#include <stdarg.h> +#include <linux/stdarg.h> #include <linux/init.h> #include <linux/kern_levels.h> #include <linux/linkage.h> @@ -70,16 +70,7 @@ extern int console_printk[]; #define minimum_console_loglevel (console_printk[2]) #define default_console_loglevel (console_printk[3]) -static inline void console_silent(void) -{ - console_loglevel = CONSOLE_LOGLEVEL_SILENT; -} - -static inline void console_verbose(void) -{ - if (console_loglevel) - console_loglevel = CONSOLE_LOGLEVEL_MOTORMOUTH; -} +extern void console_verbose(void); /* strlen("ratelimit") + 1 */ #define DEVKMSG_STR_MAX_SIZE 10 @@ -150,18 +141,6 @@ static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } #endif -#ifdef CONFIG_PRINTK_NMI -extern void printk_nmi_enter(void); -extern void printk_nmi_exit(void); -extern void printk_nmi_direct_enter(void); -extern void printk_nmi_direct_exit(void); -#else -static inline void printk_nmi_enter(void) { } -static inline void printk_nmi_exit(void) { } -static inline void printk_nmi_direct_enter(void) { } -static inline void printk_nmi_direct_exit(void) { } -#endif /* PRINTK_NMI */ - struct dev_printk_info; #ifdef CONFIG_PRINTK @@ -174,12 +153,22 @@ asmlinkage __printf(1, 0) int vprintk(const char *fmt, va_list args); asmlinkage __printf(1, 2) __cold -int printk(const char *fmt, ...); +int _printk(const char *fmt, ...); /* * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ ! */ -__printf(1, 2) __cold int printk_deferred(const char *fmt, ...); +__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); + +extern void __printk_safe_enter(void); +extern void __printk_safe_exit(void); +/* + * The printk_deferred_enter/exit macros are available only as a hack for + * some code paths that need to defer all printk console printing. Interrupts + * must be disabled for the deferred duration. + */ +#define printk_deferred_enter __printk_safe_enter +#define printk_deferred_exit __printk_safe_exit /* * Please don't use printk_ratelimit(), because it shares ratelimiting state @@ -209,8 +198,6 @@ void dump_stack_print_info(const char *log_lvl); void show_regs_print_info(const char *log_lvl); extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; -extern void printk_safe_flush(void); -extern void printk_safe_flush_on_panic(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -218,15 +205,24 @@ int vprintk(const char *s, va_list args) return 0; } static inline __printf(1, 2) __cold -int printk(const char *s, ...) +int _printk(const char *s, ...) { return 0; } static inline __printf(1, 2) __cold -int printk_deferred(const char *s, ...) +int _printk_deferred(const char *s, ...) { return 0; } + +static inline void printk_deferred_enter(void) +{ +} + +static inline void printk_deferred_exit(void) +{ +} + static inline int printk_ratelimit(void) { return 0; @@ -278,14 +274,6 @@ static inline void dump_stack_lvl(const char *log_lvl) static inline void dump_stack(void) { } - -static inline void printk_safe_flush(void) -{ -} - -static inline void printk_safe_flush_on_panic(void) -{ -} #endif #ifdef CONFIG_SMP @@ -348,6 +336,117 @@ extern int kptr_restrict; #define pr_fmt(fmt) fmt #endif +struct module; + +#ifdef CONFIG_PRINTK_INDEX +struct pi_entry { + const char *fmt; + const char *func; + const char *file; + unsigned int line; + + /* + * While printk and pr_* have the level stored in the string at compile + * time, some subsystems dynamically add it at runtime through the + * format string. For these dynamic cases, we allow the subsystem to + * tell us the level at compile time. + * + * NULL indicates that the level, if any, is stored in fmt. + */ + const char *level; + + /* + * The format string used by various subsystem specific printk() + * wrappers to prefix the message. + * + * Note that the static prefix defined by the pr_fmt() macro is stored + * directly in the message format (@fmt), not here. + */ + const char *subsys_fmt_prefix; +} __packed; + +#define __printk_index_emit(_fmt, _level, _subsys_fmt_prefix) \ + do { \ + if (__builtin_constant_p(_fmt) && __builtin_constant_p(_level)) { \ + /* + * We check __builtin_constant_p multiple times here + * for the same input because GCC will produce an error + * if we try to assign a static variable to fmt if it + * is not a constant, even with the outer if statement. + */ \ + static const struct pi_entry _entry \ + __used = { \ + .fmt = __builtin_constant_p(_fmt) ? (_fmt) : NULL, \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + .level = __builtin_constant_p(_level) ? (_level) : NULL, \ + .subsys_fmt_prefix = _subsys_fmt_prefix,\ + }; \ + static const struct pi_entry *_entry_ptr \ + __used __section(".printk_index") = &_entry; \ + } \ + } while (0) + +#else /* !CONFIG_PRINTK_INDEX */ +#define __printk_index_emit(...) do {} while (0) +#endif /* CONFIG_PRINTK_INDEX */ + +/* + * Some subsystems have their own custom printk that applies a va_format to a + * generic format, for example, to include a device number or other metadata + * alongside the format supplied by the caller. + * + * In order to store these in the way they would be emitted by the printk + * infrastructure, the subsystem provides us with the start, fixed string, and + * any subsequent text in the format string. + * + * We take a variable argument list as pr_fmt/dev_fmt/etc are sometimes passed + * as multiple arguments (eg: `"%s: ", "blah"`), and we must only take the + * first one. + * + * subsys_fmt_prefix must be known at compile time, or compilation will fail + * (since this is a mistake). If fmt or level is not known at compile time, no + * index entry will be made (since this can legitimately happen). + */ +#define printk_index_subsys_emit(subsys_fmt_prefix, level, fmt, ...) \ + __printk_index_emit(fmt, level, subsys_fmt_prefix) + +#define printk_index_wrap(_p_func, _fmt, ...) \ + ({ \ + __printk_index_emit(_fmt, NULL, NULL); \ + _p_func(_fmt, ##__VA_ARGS__); \ + }) + + +/** + * printk - print a kernel message + * @fmt: format string + * + * This is printk(). It can be called from any context. We want it to work. + * + * If printk indexing is enabled, _printk() is called from printk_index_wrap. + * Otherwise, printk is simply #defined to _printk. + * + * We try to grab the console_lock. If we succeed, it's easy - we log the + * output and call the console drivers. If we fail to get the semaphore, we + * place the output into the log buffer and return. The current holder of + * the console_sem will notice the new output in console_unlock(); and will + * send it to the consoles before releasing the lock. + * + * One effect of this deferred printing is that code which calls printk() and + * then changes console_loglevel may break. This is because console_loglevel + * is inspected when the actual printing occurs. + * + * See also: + * printf(3) + * + * See the vsnprintf() documentation for format string extensions over C99. + */ +#define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__) +#define printk_deferred(fmt, ...) \ + printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__) + /** * pr_emerg - Print an emergency-level message * @fmt: format string diff --git a/include/linux/property.h b/include/linux/property.h index 073e680c35e2..357513a977e5 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -484,8 +484,6 @@ void software_node_unregister_node_group(const struct software_node **node_group int software_node_register(const struct software_node *node); void software_node_unregister(const struct software_node *node); -int software_node_notify(struct device *dev, unsigned long action); - struct fwnode_handle * fwnode_create_software_node(const struct property_entry *properties, const struct fwnode_handle *parent); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 71fac9237725..2e5565067355 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -215,7 +215,7 @@ static inline long scaled_ppm_to_ppb(long ppm) return (long)ppb; } -#if IS_REACHABLE(CONFIG_PTP_1588_CLOCK) +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK) /** * ptp_clock_register() - register a PTP hardware clock driver @@ -307,6 +307,33 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); */ void ptp_cancel_worker_sync(struct ptp_clock *ptp); +#else +static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, + struct device *parent) +{ return NULL; } +static inline int ptp_clock_unregister(struct ptp_clock *ptp) +{ return 0; } +static inline void ptp_clock_event(struct ptp_clock *ptp, + struct ptp_clock_event *event) +{ } +static inline int ptp_clock_index(struct ptp_clock *ptp) +{ return -1; } +static inline int ptp_find_pin(struct ptp_clock *ptp, + enum ptp_pin_function func, unsigned int chan) +{ return -1; } +static inline int ptp_schedule_worker(struct ptp_clock *ptp, + unsigned long delay) +{ return -EOPNOTSUPP; } +static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) +{ } +#endif + +#if IS_BUILTIN(CONFIG_PTP_1588_CLOCK) +/* + * These are called by the network core, and don't work if PTP is in + * a loadable module. + */ + /** * ptp_get_vclocks_index() - get all vclocks index on pclock, and * caller is responsible to free memory @@ -327,26 +354,7 @@ int ptp_get_vclocks_index(int pclock_index, int **vclock_index); */ void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, int vclock_index); - #else -static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, - struct device *parent) -{ return NULL; } -static inline int ptp_clock_unregister(struct ptp_clock *ptp) -{ return 0; } -static inline void ptp_clock_event(struct ptp_clock *ptp, - struct ptp_clock_event *event) -{ } -static inline int ptp_clock_index(struct ptp_clock *ptp) -{ return -1; } -static inline int ptp_find_pin(struct ptp_clock *ptp, - enum ptp_pin_function func, unsigned int chan) -{ return -1; } -static inline int ptp_schedule_worker(struct ptp_clock *ptp, - unsigned long delay) -{ return -EOPNOTSUPP; } -static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) -{ } static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) { return 0; } static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, diff --git a/include/linux/pwm.h b/include/linux/pwm.h index a0b7e43049d5..725c9b784e60 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -404,7 +404,7 @@ int pwm_set_chip_data(struct pwm_device *pwm, void *data); void *pwm_get_chip_data(struct pwm_device *pwm); int pwmchip_add(struct pwm_chip *chip); -int pwmchip_remove(struct pwm_chip *chip); +void pwmchip_remove(struct pwm_chip *chip); int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip); diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h index 7c811eebcaab..f5672785c0c4 100644 --- a/include/linux/qcom-geni-se.h +++ b/include/linux/qcom-geni-se.h @@ -8,11 +8,24 @@ #include <linux/interconnect.h> -/* Transfer mode supported by GENI Serial Engines */ +/** + * enum geni_se_xfer_mode: Transfer modes supported by Serial Engines + * + * @GENI_SE_INVALID: Invalid mode + * @GENI_SE_FIFO: FIFO mode. Data is transferred with SE FIFO + * by programmed IO method + * @GENI_SE_DMA: Serial Engine DMA mode. Data is transferred + * with SE by DMAengine internal to SE + * @GENI_GPI_DMA: GPI DMA mode. Data is transferred using a DMAengine + * configured by a firmware residing on a GSI engine. This DMA name is + * interchangeably used as GSI or GPI which seem to imply the same DMAengine + */ + enum geni_se_xfer_mode { GENI_SE_INVALID, GENI_SE_FIFO, GENI_SE_DMA, + GENI_GPI_DMA, }; /* Protocols supported by GENI Serial Engines */ @@ -63,6 +76,7 @@ struct geni_se { #define SE_GENI_STATUS 0x40 #define GENI_SER_M_CLK_CFG 0x48 #define GENI_SER_S_CLK_CFG 0x4c +#define GENI_IF_DISABLE_RO 0x64 #define GENI_FW_REVISION_RO 0x68 #define SE_GENI_CLK_SEL 0x7c #define SE_GENI_DMA_MODE_EN 0x258 @@ -105,6 +119,9 @@ struct geni_se { #define CLK_DIV_MSK GENMASK(15, 4) #define CLK_DIV_SHFT 4 +/* GENI_IF_DISABLE_RO fields */ +#define FIFO_IF_DISABLE (BIT(0)) + /* GENI_FW_REVISION_RO fields */ #define FW_REV_PROTOCOL_MSK GENMASK(15, 8) #define FW_REV_PROTOCOL_SHFT 8 diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 0165824c5128..c0475d1c9885 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -109,6 +109,12 @@ extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp); extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en); + +extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version); +extern int qcom_scm_lmh_profile_change(u32 profile_id); +extern bool qcom_scm_lmh_dcvsh_available(void); + #else #include <linux/errno.h> @@ -170,5 +176,13 @@ static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en) { return -ENODEV; } + +static inline int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val, + u64 limit_node, u32 node_id, u64 version) + { return -ENODEV; } + +static inline int qcom_scm_lmh_profile_change(u32 profile_id) { return -ENODEV; } + +static inline bool qcom_scm_lmh_dcvsh_available(void) { return -ENODEV; } #endif #endif diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d31ecaf4fdd3..235047d7a1b5 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -17,24 +17,14 @@ #ifndef _LINUX_RBTREE_H #define _LINUX_RBTREE_H +#include <linux/rbtree_types.h> + #include <linux/kernel.h> #include <linux/stddef.h> #include <linux/rcupdate.h> -struct rb_node { - unsigned long __rb_parent_color; - struct rb_node *rb_right; - struct rb_node *rb_left; -} __attribute__((aligned(sizeof(long)))); - /* The alignment might seem pointless, but allegedly CRIS needs it */ - -struct rb_root { - struct rb_node *rb_node; -}; - #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) -#define RB_ROOT (struct rb_root) { NULL, } #define rb_entry(ptr, type, member) container_of(ptr, type, member) #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) @@ -112,23 +102,6 @@ static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent typeof(*pos), field); 1; }); \ pos = n) -/* - * Leftmost-cached rbtrees. - * - * We do not cache the rightmost node based on footprint - * size vs number of potential users that could benefit - * from O(1) rb_last(). Just not worth it, users that want - * this feature can always implement the logic explicitly. - * Furthermore, users that want to cache both pointers may - * find it a bit asymmetric, but that's ok. - */ -struct rb_root_cached { - struct rb_root rb_root; - struct rb_node *rb_leftmost; -}; - -#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } - /* Same as rb_first(), but O(1) */ #define rb_first_cached(root) (root)->rb_leftmost diff --git a/include/linux/rbtree_types.h b/include/linux/rbtree_types.h new file mode 100644 index 000000000000..45b6ecde3665 --- /dev/null +++ b/include/linux/rbtree_types.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_RBTREE_TYPES_H +#define _LINUX_RBTREE_TYPES_H + +struct rb_node { + unsigned long __rb_parent_color; + struct rb_node *rb_right; + struct rb_node *rb_left; +} __attribute__((aligned(sizeof(long)))); +/* The alignment might seem pointless, but allegedly CRIS needs it */ + +struct rb_root { + struct rb_node *rb_node; +}; + +/* + * Leftmost-cached rbtrees. + * + * We do not cache the rightmost node based on footprint + * size vs number of potential users that could benefit + * from O(1) rb_last(). Just not worth it, users that want + * this feature can always implement the logic explicitly. + * Furthermore, users that want to cache both pointers may + * find it a bit asymmetric, but that's ok. + */ +struct rb_root_cached { + struct rb_root rb_root; + struct rb_node *rb_leftmost; +}; + +#define RB_ROOT (struct rb_root) { NULL, } +#define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } + +#endif diff --git a/include/linux/rculist.h b/include/linux/rculist.h index f8633d37e358..d29740be4833 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -11,15 +11,6 @@ #include <linux/rcupdate.h> /* - * Why is there no list_empty_rcu()? Because list_empty() serves this - * purpose. The list_empty() function fetches the RCU-protected pointer - * and compares it to the address of the list head, but neither dereferences - * this pointer itself nor provides this pointer to the caller. Therefore, - * it is not necessary to use rcu_dereference(), so that list_empty() can - * be used anywhere you would want to use a list_empty_rcu(). - */ - -/* * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers * @list: list to be initialized * @@ -318,21 +309,29 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, /* * Where are list_empty_rcu() and list_first_entry_rcu()? * - * Implementing those functions following their counterparts list_empty() and - * list_first_entry() is not advisable because they lead to subtle race - * conditions as the following snippet shows: + * They do not exist because they would lead to subtle race conditions: * * if (!list_empty_rcu(mylist)) { * struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member); * do_something(bar); * } * - * The list may not be empty when list_empty_rcu checks it, but it may be when - * list_first_entry_rcu rereads the ->next pointer. - * - * Rereading the ->next pointer is not a problem for list_empty() and - * list_first_entry() because they would be protected by a lock that blocks - * writers. + * The list might be non-empty when list_empty_rcu() checks it, but it + * might have become empty by the time that list_first_entry_rcu() rereads + * the ->next pointer, which would result in a SEGV. + * + * When not using RCU, it is OK for list_first_entry() to re-read that + * pointer because both functions should be protected by some lock that + * blocks writers. + * + * When using RCU, list_empty() uses READ_ONCE() to fetch the + * RCU-protected ->next pointer and then compares it to the address of the + * list head. However, it neither dereferences this pointer nor provides + * this pointer to its caller. Thus, READ_ONCE() suffices (that is, + * rcu_dereference() is not needed), which means that list_empty() can be + * used anywhere you would want to use list_empty_rcu(). Just don't + * expect anything useful to happen if you do a subsequent lockless + * call to list_first_entry_rcu()!!! * * See list_first_or_null_rcu for an alternative. */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index d9680b798b21..434d12fe2d4f 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -53,7 +53,7 @@ void __rcu_read_unlock(void); * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ -#define rcu_preempt_depth() (current->rcu_read_lock_nesting) +#define rcu_preempt_depth() READ_ONCE(current->rcu_read_lock_nesting) #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -167,7 +167,7 @@ void synchronize_rcu_tasks(void); # define synchronize_rcu_tasks synchronize_rcu # endif -# ifdef CONFIG_TASKS_RCU_TRACE +# ifdef CONFIG_TASKS_TRACE_RCU # define rcu_tasks_trace_qs(t) \ do { \ if (!likely(READ_ONCE((t)->trc_reader_checked)) && \ diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 953e70fafe38..9be015305f9f 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -14,9 +14,6 @@ #include <asm/param.h> /* for HZ */ -/* Never flag non-existent other CPUs! */ -static inline bool rcu_eqs_special_set(int cpu) { return false; } - unsigned long get_state_synchronize_rcu(void); unsigned long start_poll_synchronize_rcu(void); bool poll_state_synchronize_rcu(unsigned long oldstate); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index f5f08dd0a116..e3c9a25a853a 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -344,6 +344,7 @@ typedef void (*regmap_unlock)(void *); * @ranges: Array of configuration entries for virtual address ranges. * @num_ranges: Number of range configuration entries. * @use_hwlock: Indicate if a hardware spinlock should be used. + * @use_raw_spinlock: Indicate if a raw spinlock should be used. * @hwlock_id: Specify the hardware spinlock id. * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, * HWLOCK_IRQ or 0. @@ -403,6 +404,7 @@ struct regmap_config { unsigned int num_ranges; bool use_hwlock; + bool use_raw_spinlock; unsigned int hwlock_id; unsigned int hwlock_mode; @@ -1269,12 +1271,13 @@ void devm_regmap_field_free(struct device *dev, struct regmap_field *field); int regmap_field_bulk_alloc(struct regmap *regmap, struct regmap_field **rm_field, - struct reg_field *reg_field, + const struct reg_field *reg_field, int num_fields); void regmap_field_bulk_free(struct regmap_field *field); int devm_regmap_field_bulk_alloc(struct device *dev, struct regmap *regmap, struct regmap_field **field, - struct reg_field *reg_field, int num_fields); + const struct reg_field *reg_field, + int num_fields); void devm_regmap_field_bulk_free(struct device *dev, struct regmap_field *field); diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index f72ca73631be..bbf6590a6dec 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -222,17 +222,12 @@ void regulator_bulk_unregister_supply_alias(struct device *dev, int devm_regulator_register_supply_alias(struct device *dev, const char *id, struct device *alias_dev, const char *alias_id); -void devm_regulator_unregister_supply_alias(struct device *dev, - const char *id); int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, const char *const *alias_id, int num_id); -void devm_regulator_bulk_unregister_supply_alias(struct device *dev, - const char *const *id, - int num_id); /* regulator output control and status */ int __must_check regulator_enable(struct regulator *regulator); @@ -408,11 +403,6 @@ static inline int devm_regulator_register_supply_alias(struct device *dev, return 0; } -static inline void devm_regulator_unregister_supply_alias(struct device *dev, - const char *id) -{ -} - static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, const char *const *id, struct device *alias_dev, @@ -422,11 +412,6 @@ static inline int devm_regulator_bulk_register_supply_alias(struct device *dev, return 0; } -static inline void devm_regulator_bulk_unregister_supply_alias( - struct device *dev, const char *const *id, int num_id) -{ -} - static inline int regulator_enable(struct regulator *regulator) { return 0; diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 4aec20387857..bd7a73db2e66 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h @@ -337,6 +337,12 @@ enum regulator_type { * @pull_down_val_on: Enabling value for control when using regmap * set_pull_down * + * @ramp_reg: Register for controlling the regulator ramp-rate. + * @ramp_mask: Bitmask for the ramp-rate control register. + * @ramp_delay_table: Table for mapping the regulator ramp-rate values. Values + * should be given in units of V/S (uV/uS). See the + * regulator_set_ramp_delay_regmap(). + * * @enable_time: Time taken for initial enable of regulator (in uS). * @off_on_delay: guard time (in uS), before re-enabling a regulator * @@ -462,7 +468,7 @@ struct regulator_err_state { }; /** - * struct regulator_irq_data - regulator error/notification status date + * struct regulator_irq_data - regulator error/notification status data * * @states: Status structs for each of the associated regulators. * @num_states: Amount of associated regulators. @@ -521,8 +527,8 @@ struct regulator_irq_data { * active events as core does not clean the map data. * REGULATOR_FAILED_RETRY can be returned to indicate that the * status reading from IC failed. If this is repeated for - * fatal_cnt times the core will call die() callback or BUG() - * as a last resort to protect the HW. + * fatal_cnt times the core will call die() callback or power-off + * the system as a last resort to protect the HW. * @renable: Optional callback to check status (if HW supports that) before * re-enabling IRQ. If implemented this should clear the error * flags so that errors fetched by regulator_get_error_flags() @@ -531,7 +537,8 @@ struct regulator_irq_data { * REGULATOR_FAILED_RETRY can be returned to * indicate that the status reading from IC failed. If this is * repeated for 'fatal_cnt' times the core will call die() - * callback or BUG() as a last resort to protect the HW. + * callback or if die() is not populated then attempt to power-off + * the system as a last resort to protect the HW. * Returning zero indicates that the problem in HW has been solved * and IRQ will be re-enabled. Returning REGULATOR_ERROR_ON * indicates the error condition is still active and keeps IRQ @@ -645,7 +652,6 @@ devm_regulator_register(struct device *dev, const struct regulator_desc *regulator_desc, const struct regulator_config *config); void regulator_unregister(struct regulator_dev *rdev); -void devm_regulator_unregister(struct device *dev, struct regulator_dev *rdev); int regulator_notifier_call_chain(struct regulator_dev *rdev, unsigned long event, void *data); diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 68b4a514a410..621b7f4a3639 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h @@ -112,7 +112,7 @@ struct notification_limit { * @over_voltage_limits: Limits for acting on over voltage. * @under_voltage_limits: Limits for acting on under voltage. * @temp_limits: Limits for acting on over temperature. - + * * @max_spread: Max possible spread between coupled regulators * @max_uV_step: Max possible step change in voltage * @valid_modes_mask: Mask of modes which may be configured by consumers. diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index a5b37bc10865..83c09ac36b13 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h @@ -369,9 +369,8 @@ enum rsc_handling_status { * @da_to_va: optional platform hook to perform address translations * @parse_fw: parse firmware to extract information (e.g. resource table) * @handle_rsc: optional platform hook to handle vendor resources. Should return - * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled and a - * negative value on error - * @load_rsc_table: load resource table from firmware image + * RSC_HANDLED if resource was handled, RSC_IGNORED if not handled + * and a negative value on error * @find_loaded_rsc_table: find the loaded resource table from firmware image * @get_loaded_rsc_table: get resource table installed in memory * by external entity diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 9b05af9b3e28..21deb5212bbd 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,6 +2,8 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include <linux/kernel.h> +#include <linux/list.h> #include <linux/pid.h> #ifdef CONFIG_PROC_CPU_RESCTRL @@ -13,4 +15,186 @@ int proc_resctrl_show(struct seq_file *m, #endif +/** + * enum resctrl_conf_type - The type of configuration. + * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. + * @CDP_CODE: Configuration applies to instruction fetches. + * @CDP_DATA: Configuration applies to reads and writes. + */ +enum resctrl_conf_type { + CDP_NONE, + CDP_CODE, + CDP_DATA, +}; + +#define CDP_NUM_TYPES (CDP_DATA + 1) + +/** + * struct resctrl_staged_config - parsed configuration to be applied + * @new_ctrl: new ctrl value to be loaded + * @have_new_ctrl: whether the user provided new_ctrl is valid + */ +struct resctrl_staged_config { + u32 new_ctrl; + bool have_new_ctrl; +}; + +/** + * struct rdt_domain - group of CPUs sharing a resctrl resource + * @list: all instances of this resource + * @id: unique id for this instance + * @cpu_mask: which CPUs share this resource + * @rmid_busy_llc: bitmap of which limbo RMIDs are above threshold + * @mbm_total: saved state for MBM total bandwidth + * @mbm_local: saved state for MBM local bandwidth + * @mbm_over: worker to periodically read MBM h/w counters + * @cqm_limbo: worker to periodically read CQM h/w counters + * @mbm_work_cpu: worker CPU for MBM h/w counters + * @cqm_work_cpu: worker CPU for CQM h/w counters + * @plr: pseudo-locked region (if any) associated with domain + * @staged_config: parsed configuration to be applied + */ +struct rdt_domain { + struct list_head list; + int id; + struct cpumask cpu_mask; + unsigned long *rmid_busy_llc; + struct mbm_state *mbm_total; + struct mbm_state *mbm_local; + struct delayed_work mbm_over; + struct delayed_work cqm_limbo; + int mbm_work_cpu; + int cqm_work_cpu; + struct pseudo_lock_region *plr; + struct resctrl_staged_config staged_config[CDP_NUM_TYPES]; +}; + +/** + * struct resctrl_cache - Cache allocation related data + * @cbm_len: Length of the cache bit mask + * @min_cbm_bits: Minimum number of consecutive bits to be set + * @shareable_bits: Bitmask of shareable resource with other + * executing entities + * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_empty_bitmaps: True if the '0' bitmap is valid. + * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache + * level has CPU scope. + */ +struct resctrl_cache { + unsigned int cbm_len; + unsigned int min_cbm_bits; + unsigned int shareable_bits; + bool arch_has_sparse_bitmaps; + bool arch_has_empty_bitmaps; + bool arch_has_per_cpu_cfg; +}; + +/** + * enum membw_throttle_mode - System's memory bandwidth throttling mode + * @THREAD_THROTTLE_UNDEFINED: Not relevant to the system + * @THREAD_THROTTLE_MAX: Memory bandwidth is throttled at the core + * always using smallest bandwidth percentage + * assigned to threads, aka "max throttling" + * @THREAD_THROTTLE_PER_THREAD: Memory bandwidth is throttled at the thread + */ +enum membw_throttle_mode { + THREAD_THROTTLE_UNDEFINED = 0, + THREAD_THROTTLE_MAX, + THREAD_THROTTLE_PER_THREAD, +}; + +/** + * struct resctrl_membw - Memory bandwidth allocation related data + * @min_bw: Minimum memory bandwidth percentage user can request + * @bw_gran: Granularity at which the memory bandwidth is allocated + * @delay_linear: True if memory B/W delay is in linear scale + * @arch_needs_linear: True if we can't configure non-linear resources + * @throttle_mode: Bandwidth throttling mode when threads request + * different memory bandwidths + * @mba_sc: True if MBA software controller(mba_sc) is enabled + * @mb_map: Mapping of memory B/W percentage to memory B/W delay + */ +struct resctrl_membw { + u32 min_bw; + u32 bw_gran; + u32 delay_linear; + bool arch_needs_linear; + enum membw_throttle_mode throttle_mode; + bool mba_sc; + u32 *mb_map; +}; + +struct rdt_parse_data; +struct resctrl_schema; + +/** + * struct rdt_resource - attributes of a resctrl resource + * @rid: The index of the resource + * @alloc_enabled: Is allocation enabled on this machine + * @mon_enabled: Is monitoring enabled for this feature + * @alloc_capable: Is allocation available on this machine + * @mon_capable: Is monitor feature available on this machine + * @num_rmid: Number of RMIDs available + * @cache_level: Which cache level defines scope of this resource + * @cache: Cache allocation related data + * @membw: If the component has bandwidth controls, their properties. + * @domains: All domains for this resource + * @name: Name to use in "schemata" file. + * @data_width: Character width of data when displaying + * @default_ctrl: Specifies default cache cbm or memory B/W percent. + * @format_str: Per resource format string to show domain value + * @parse_ctrlval: Per resource function pointer to parse control values + * @evt_list: List of monitoring events + * @fflags: flags to choose base and info files + * @cdp_capable: Is the CDP feature available on this resource + */ +struct rdt_resource { + int rid; + bool alloc_enabled; + bool mon_enabled; + bool alloc_capable; + bool mon_capable; + int num_rmid; + int cache_level; + struct resctrl_cache cache; + struct resctrl_membw membw; + struct list_head domains; + char *name; + int data_width; + u32 default_ctrl; + const char *format_str; + int (*parse_ctrlval)(struct rdt_parse_data *data, + struct resctrl_schema *s, + struct rdt_domain *d); + struct list_head evt_list; + unsigned long fflags; + bool cdp_capable; +}; + +/** + * struct resctrl_schema - configuration abilities of a resource presented to + * user-space + * @list: Member of resctrl_schema_all. + * @name: The name to use in the "schemata" file. + * @conf_type: Whether this schema is specific to code/data. + * @res: The resource structure exported by the architecture to describe + * the hardware that is configured by this schema. + * @num_closid: The number of closid that can be used with this schema. When + * features like CDP are enabled, this will be lower than the + * hardware supports for the resource. + */ +struct resctrl_schema { + struct list_head list; + char name[8]; + enum resctrl_conf_type conf_type; + struct rdt_resource *res; + u32 num_closid; +}; + +/* The number of closid supported by this resource regardless of CDP */ +u32 resctrl_arch_get_num_closid(struct rdt_resource *r); +int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type type); + #endif /* _RESCTRL_H */ diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index d1672de9ca89..9deedfeec2b1 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -13,12 +13,39 @@ #ifndef __LINUX_RT_MUTEX_H #define __LINUX_RT_MUTEX_H +#include <linux/compiler.h> #include <linux/linkage.h> -#include <linux/rbtree.h> -#include <linux/spinlock_types.h> +#include <linux/rbtree_types.h> +#include <linux/spinlock_types_raw.h> extern int max_lock_depth; /* for sysctl */ +struct rt_mutex_base { + raw_spinlock_t wait_lock; + struct rb_root_cached waiters; + struct task_struct *owner; +}; + +#define __RT_MUTEX_BASE_INITIALIZER(rtbasename) \ +{ \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ + .waiters = RB_ROOT_CACHED, \ + .owner = NULL \ +} + +/** + * rt_mutex_base_is_locked - is the rtmutex locked + * @lock: the mutex to be queried + * + * Returns true if the mutex is locked, false if unlocked. + */ +static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) +{ + return READ_ONCE(lock->owner) != NULL; +} + +extern void rt_mutex_base_init(struct rt_mutex_base *rtb); + /** * The rt_mutex structure * @@ -28,9 +55,7 @@ extern int max_lock_depth; /* for sysctl */ * @owner: the mutex owner */ struct rt_mutex { - raw_spinlock_t wait_lock; - struct rb_root_cached waiters; - struct task_struct *owner; + struct rt_mutex_base rtmutex; #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; #endif @@ -52,32 +77,24 @@ do { \ } while (0) #ifdef CONFIG_DEBUG_LOCK_ALLOC -#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ - , .dep_map = { .name = #mutexname } +#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ + .dep_map = { \ + .name = #mutexname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + } #else #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT_CACHED \ - , .owner = NULL \ - __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} +#define __RT_MUTEX_INITIALIZER(mutexname) \ +{ \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \ + __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ +} #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -/** - * rt_mutex_is_locked - is the mutex locked - * @lock: the mutex to be queried - * - * Returns 1 if the mutex is locked, 0 if unlocked. - */ -static inline int rt_mutex_is_locked(struct rt_mutex *lock) -{ - return lock->owner != NULL; -} - extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/include/linux/rwbase_rt.h b/include/linux/rwbase_rt.h new file mode 100644 index 000000000000..1d264dd08625 --- /dev/null +++ b/include/linux/rwbase_rt.h @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef _LINUX_RWBASE_RT_H +#define _LINUX_RWBASE_RT_H + +#include <linux/rtmutex.h> +#include <linux/atomic.h> + +#define READER_BIAS (1U << 31) +#define WRITER_BIAS (1U << 30) + +struct rwbase_rt { + atomic_t readers; + struct rt_mutex_base rtmutex; +}; + +#define __RWBASE_INITIALIZER(name) \ +{ \ + .readers = ATOMIC_INIT(READER_BIAS), \ + .rtmutex = __RT_MUTEX_BASE_INITIALIZER(name.rtmutex), \ +} + +#define init_rwbase_rt(rwbase) \ + do { \ + rt_mutex_base_init(&(rwbase)->rtmutex); \ + atomic_set(&(rwbase)->readers, READER_BIAS); \ + } while (0) + + +static __always_inline bool rw_base_is_locked(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) != READER_BIAS; +} + +static __always_inline bool rw_base_is_contended(struct rwbase_rt *rwb) +{ + return atomic_read(&rwb->readers) > 0; +} + +#endif /* _LINUX_RWBASE_RT_H */ diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 index 000000000000..49c1f3842ed5 --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_RT_H +#error Do not #include directly. Use <linux/spinlock.h>. +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_rwlock_init(rwlock_t *rwlock, const char *name, + struct lock_class_key *key); +#else +static inline void __rt_rwlock_init(rwlock_t *rwlock, char *name, + struct lock_class_key *key) +{ +} +#endif + +#define rwlock_init(rwl) \ +do { \ + static struct lock_class_key __key; \ + \ + init_rwbase_rt(&(rwl)->rwbase); \ + __rt_rwlock_init(rwl, #rwl, &__key); \ +} while (0) + +extern void rt_read_lock(rwlock_t *rwlock); +extern int rt_read_trylock(rwlock_t *rwlock); +extern void rt_read_unlock(rwlock_t *rwlock); +extern void rt_write_lock(rwlock_t *rwlock); +extern int rt_write_trylock(rwlock_t *rwlock); +extern void rt_write_unlock(rwlock_t *rwlock); + +static __always_inline void read_lock(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_read_lock(rwlock); +} + +static __always_inline void read_lock_irq(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); +} + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_read_lock(lock); \ + flags = 0; \ + } while (0) + +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) + +static __always_inline void read_unlock(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_bh(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void read_unlock_irq(rwlock_t *rwlock) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void read_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_read_unlock(rwlock); +} + +static __always_inline void write_lock(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_bh(rwlock_t *rwlock) +{ + local_bh_disable(); + rt_write_lock(rwlock); +} + +static __always_inline void write_lock_irq(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); +} + +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + rt_write_lock(lock); \ + flags = 0; \ + } while (0) + +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + +#define write_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = write_trylock(lock); \ + __locked; \ +}) + +static __always_inline void write_unlock(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_bh(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); + local_bh_enable(); +} + +static __always_inline void write_unlock_irq(rwlock_t *rwlock) +{ + rt_write_unlock(rwlock); +} + +static __always_inline void write_unlock_irqrestore(rwlock_t *rwlock, + unsigned long flags) +{ + rt_write_unlock(rwlock); +} + +#define rwlock_is_contended(lock) (((void)(lock), 0)) + +#endif /* __LINUX_RWLOCK_RT_H */ diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index 3bd03e18061c..1948442e7750 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,9 +1,23 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) +# error "Do not include directly, include spinlock_types.h" +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers + * generic rwlock type definitions and initializers * * portions Copyright 2005, Red Hat, Inc., Ingo Molnar * Released under the General Public License (GPL). @@ -21,16 +35,6 @@ typedef struct { #define RWLOCK_MAGIC 0xdeaf1eed -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RW_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RW_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_SPINLOCK #define __RW_LOCK_UNLOCKED(lockname) \ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \ @@ -46,4 +50,29 @@ typedef struct { #define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +typedef struct { + struct rwbase_rt rwbase; + atomic_t readers; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#define __RWLOCK_RT_INITIALIZER(name) \ +{ \ + .rwbase = __RWBASE_INITIALIZER(name), \ + RW_DEP_MAP_INIT(name) \ +} + +#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name) + +#define DEFINE_RWLOCK(name) \ + rwlock_t name = __RW_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #endif /* __LINUX_RWLOCK_TYPES_H */ diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index a66038d88878..352c6127cb90 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -16,6 +16,19 @@ #include <linux/spinlock.h> #include <linux/atomic.h> #include <linux/err.h> + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __RWSEM_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SLEEP, \ + }, +#else +# define __RWSEM_DEP_MAP_INIT(lockname) +#endif + +#ifndef CONFIG_PREEMPT_RT + #ifdef CONFIG_RWSEM_SPIN_ON_OWNER #include <linux/osq_lock.h> #endif @@ -64,16 +77,6 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem) /* Common initializer macros and functions */ -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __RWSEM_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SLEEP, \ - }, -#else -# define __RWSEM_DEP_MAP_INIT(lockname) -#endif - #ifdef CONFIG_DEBUG_RWSEMS # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname, #else @@ -119,6 +122,53 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#else /* !CONFIG_PREEMPT_RT */ + +#include <linux/rwbase_rt.h> + +struct rw_semaphore { + struct rwbase_rt rwbase; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __RWSEM_INITIALIZER(name) \ + { \ + .rwbase = __RWBASE_INITIALIZER(name), \ + __RWSEM_DEP_MAP_INIT(name) \ + } + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +extern void __init_rwsem(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +static __always_inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return rw_base_is_locked(&sem->rwbase); +} + +static __always_inline int rwsem_is_contended(struct rw_semaphore *sem) +{ + return rw_base_is_contended(&sem->rwbase); +} + +#endif /* CONFIG_PREEMPT_RT */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index ecf87484814f..266754a55327 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -39,6 +39,12 @@ struct sg_table { unsigned int orig_nents; /* original size of list */ }; +struct sg_append_table { + struct sg_table sgt; /* The scatter list table */ + struct scatterlist *prv; /* last populated sge in the table */ + unsigned int total_nents; /* Total entries in the table */ +}; + /* * Notes on SG table design. * @@ -280,19 +286,51 @@ typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); void __sg_free_table(struct sg_table *, unsigned int, unsigned int, - sg_free_fn *); + sg_free_fn *, unsigned int); void sg_free_table(struct sg_table *); +void sg_free_append_table(struct sg_append_table *sgt); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); -struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, - struct page **pages, unsigned int n_pages, unsigned int offset, - unsigned long size, unsigned int max_segment, - struct scatterlist *prv, unsigned int left_pages, - gfp_t gfp_mask); -int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, gfp_t gfp_mask); +int sg_alloc_append_table_from_pages(struct sg_append_table *sgt, + struct page **pages, unsigned int n_pages, + unsigned int offset, unsigned long size, + unsigned int max_segment, + unsigned int left_pages, gfp_t gfp_mask); +int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, + unsigned int max_segment, gfp_t gfp_mask); + +/** + * sg_alloc_table_from_pages - Allocate and initialize an sg table from + * an array of pages + * @sgt: The sg table header to use + * @pages: Pointer to an array of page pointers + * @n_pages: Number of pages in the pages array + * @offset: Offset from start of the first page to the start of a buffer + * @size: Number of valid bytes in the buffer (after offset) + * @gfp_mask: GFP allocation mask + * + * Description: + * Allocate and initialize an sg table from a list of pages. Contiguous + * ranges of the pages are squashed into a single scatterlist node. A user + * may provide an offset at a start and a size of valid data in a buffer + * specified by the page array. The returned sg table is released by + * sg_free_table. + * + * Returns: + * 0 on success, negative error on failure + */ +static inline int sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, + unsigned int n_pages, + unsigned int offset, + unsigned long size, gfp_t gfp_mask) +{ + return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, + size, UINT_MAX, gfp_mask); +} #ifdef CONFIG_SGL_ALLOC struct scatterlist *sgl_alloc_order(unsigned long long length, diff --git a/include/linux/sched.h b/include/linux/sched.h index ec8d07d88641..e12b524426b0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -42,6 +42,7 @@ struct backing_dev_info; struct bio_list; struct blk_plug; struct bpf_local_storage; +struct bpf_run_ctx; struct capture_control; struct cfs_rq; struct fs_struct; @@ -95,7 +96,9 @@ struct task_group; #define TASK_WAKING 0x0200 #define TASK_NOLOAD 0x0400 #define TASK_NEW 0x0800 -#define TASK_STATE_MAX 0x1000 +/* RT specific auxilliary flag to mark RT lock waiters */ +#define TASK_RTLOCK_WAIT 0x1000 +#define TASK_STATE_MAX 0x2000 /* Convenience macros for the sake of set_current_state: */ #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) @@ -121,8 +124,6 @@ struct task_group; #define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) -#ifdef CONFIG_DEBUG_ATOMIC_SLEEP - /* * Special states are those that do not use the normal wait-loop pattern. See * the comment with set_special_state(). @@ -130,30 +131,37 @@ struct task_group; #define is_special_task_state(state) \ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) -#define __set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - WRITE_ONCE(current->__state, (state_value)); \ - } while (0) - -#define set_current_state(state_value) \ - do { \ - WARN_ON_ONCE(is_special_task_state(state_value));\ - current->task_state_change = _THIS_IP_; \ - smp_store_mb(current->__state, (state_value)); \ +#ifdef CONFIG_DEBUG_ATOMIC_SLEEP +# define debug_normal_state_change(state_value) \ + do { \ + WARN_ON_ONCE(is_special_task_state(state_value)); \ + current->task_state_change = _THIS_IP_; \ } while (0) -#define set_special_state(state_value) \ +# define debug_special_state_change(state_value) \ do { \ - unsigned long flags; /* may shadow */ \ WARN_ON_ONCE(!is_special_task_state(state_value)); \ - raw_spin_lock_irqsave(¤t->pi_lock, flags); \ current->task_state_change = _THIS_IP_; \ - WRITE_ONCE(current->__state, (state_value)); \ - raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) + +# define debug_rtlock_wait_set_state() \ + do { \ + current->saved_state_change = current->task_state_change;\ + current->task_state_change = _THIS_IP_; \ + } while (0) + +# define debug_rtlock_wait_restore_state() \ + do { \ + current->task_state_change = current->saved_state_change;\ + } while (0) + #else +# define debug_normal_state_change(cond) do { } while (0) +# define debug_special_state_change(cond) do { } while (0) +# define debug_rtlock_wait_set_state() do { } while (0) +# define debug_rtlock_wait_restore_state() do { } while (0) +#endif + /* * set_current_state() includes a barrier so that the write of current->state * is correctly serialised wrt the caller's subsequent test of whether to @@ -192,26 +200,77 @@ struct task_group; * Also see the comments of try_to_wake_up(). */ #define __set_current_state(state_value) \ - WRITE_ONCE(current->__state, (state_value)) + do { \ + debug_normal_state_change((state_value)); \ + WRITE_ONCE(current->__state, (state_value)); \ + } while (0) #define set_current_state(state_value) \ - smp_store_mb(current->__state, (state_value)) + do { \ + debug_normal_state_change((state_value)); \ + smp_store_mb(current->__state, (state_value)); \ + } while (0) /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must - * serialize against wakeups such that any possible in-flight TASK_RUNNING stores - * will not collide with our state change. + * serialize against wakeups such that any possible in-flight TASK_RUNNING + * stores will not collide with our state change. */ #define set_special_state(state_value) \ do { \ unsigned long flags; /* may shadow */ \ + \ raw_spin_lock_irqsave(¤t->pi_lock, flags); \ + debug_special_state_change((state_value)); \ WRITE_ONCE(current->__state, (state_value)); \ raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \ } while (0) -#endif +/* + * PREEMPT_RT specific variants for "sleeping" spin/rwlocks + * + * RT's spin/rwlock substitutions are state preserving. The state of the + * task when blocking on the lock is saved in task_struct::saved_state and + * restored after the lock has been acquired. These operations are + * serialized by task_struct::pi_lock against try_to_wake_up(). Any non RT + * lock related wakeups while the task is blocked on the lock are + * redirected to operate on task_struct::saved_state to ensure that these + * are not dropped. On restore task_struct::saved_state is set to + * TASK_RUNNING so any wakeup attempt redirected to saved_state will fail. + * + * The lock operation looks like this: + * + * current_save_and_set_rtlock_wait_state(); + * for (;;) { + * if (try_lock()) + * break; + * raw_spin_unlock_irq(&lock->wait_lock); + * schedule_rtlock(); + * raw_spin_lock_irq(&lock->wait_lock); + * set_current_state(TASK_RTLOCK_WAIT); + * } + * current_restore_rtlock_saved_state(); + */ +#define current_save_and_set_rtlock_wait_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + current->saved_state = current->__state; \ + debug_rtlock_wait_set_state(); \ + WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); + +#define current_restore_rtlock_saved_state() \ + do { \ + lockdep_assert_irqs_disabled(); \ + raw_spin_lock(¤t->pi_lock); \ + debug_rtlock_wait_restore_state(); \ + WRITE_ONCE(current->__state, current->saved_state); \ + current->saved_state = TASK_RUNNING; \ + raw_spin_unlock(¤t->pi_lock); \ + } while (0); #define get_current_state() READ_ONCE(current->__state) @@ -230,6 +289,9 @@ extern long schedule_timeout_idle(long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); asmlinkage void preempt_schedule_irq(void); +#ifdef CONFIG_PREEMPT_RT + extern void schedule_rtlock(void); +#endif extern int __must_check io_schedule_prepare(void); extern void io_schedule_finish(int token); @@ -668,6 +730,11 @@ struct task_struct { #endif unsigned int __state; +#ifdef CONFIG_PREEMPT_RT + /* saved state for "spinlock sleepers" */ + unsigned int saved_state; +#endif + /* * This begins the randomizable portion of task_struct. Only * scheduling-critical items should be added above here. @@ -748,6 +815,7 @@ struct task_struct { unsigned int policy; int nr_cpus_allowed; const cpumask_t *cpus_ptr; + cpumask_t *user_cpus_ptr; cpumask_t cpus_mask; void *migration_pending; #ifdef CONFIG_SMP @@ -863,6 +931,10 @@ struct task_struct { /* Used by page_owner=on to detect recursion in page tracking. */ unsigned in_page_owner:1; #endif +#ifdef CONFIG_EVENTFD + /* Recursion prevention for eventfd_signal() */ + unsigned in_eventfd_signal:1; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ @@ -1357,6 +1429,9 @@ struct task_struct { struct kmap_ctrl kmap_ctrl; #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; +# ifdef CONFIG_PREEMPT_RT + unsigned long saved_state_change; +# endif #endif int pagefault_disabled; #ifdef CONFIG_MMU @@ -1379,6 +1454,8 @@ struct task_struct { #ifdef CONFIG_BPF_SYSCALL /* Used by BPF task local storage */ struct bpf_local_storage __rcu *bpf_storage; + /* Used for BPF run context */ + struct bpf_run_ctx *bpf_ctx; #endif #ifdef CONFIG_GCC_PLUGIN_STACKLEAK @@ -1400,6 +1477,16 @@ struct task_struct { struct llist_head kretprobe_instances; #endif +#ifdef CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH + /* + * If L1D flush is supported on mm context switch + * then we use this callback head to queue kill work + * to kill tasks that are not running on SMT disabled + * cores + */ + struct callback_head l1d_flush_kill; +#endif + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1705,6 +1792,11 @@ extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_ #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node); +extern void release_user_cpus_ptr(struct task_struct *p); +extern int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask); +extern void force_compatible_cpus_allowed_ptr(struct task_struct *p); +extern void relax_compatible_cpus_allowed_ptr(struct task_struct *p); #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -1715,6 +1807,21 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpuma return -EINVAL; return 0; } +static inline int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node) +{ + if (src->user_cpus_ptr) + return -EINVAL; + return 0; +} +static inline void release_user_cpus_ptr(struct task_struct *p) +{ + WARN_ON(p->user_cpus_ptr); +} + +static inline int dl_task_check_affinity(struct task_struct *p, const struct cpumask *mask) +{ + return 0; +} #endif extern int yield_to(struct task_struct *p, bool preempt); diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index e24b1fe348e3..5561486fddef 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -174,13 +174,13 @@ static inline gfp_t current_gfp_context(gfp_t flags) } #ifdef CONFIG_LOCKDEP -extern void __fs_reclaim_acquire(void); -extern void __fs_reclaim_release(void); +extern void __fs_reclaim_acquire(unsigned long ip); +extern void __fs_reclaim_release(unsigned long ip); extern void fs_reclaim_acquire(gfp_t gfp_mask); extern void fs_reclaim_release(gfp_t gfp_mask); #else -static inline void __fs_reclaim_acquire(void) { } -static inline void __fs_reclaim_release(void) { } +static inline void __fs_reclaim_acquire(unsigned long ip) { } +static inline void __fs_reclaim_release(unsigned long ip) { } static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } static inline void fs_reclaim_release(gfp_t gfp_mask) { } #endif @@ -306,7 +306,7 @@ set_active_memcg(struct mem_cgroup *memcg) { struct mem_cgroup *old; - if (in_interrupt()) { + if (!in_task()) { old = this_cpu_read(int_active_memcg); this_cpu_write(int_active_memcg, memcg); } else { diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index b9126fe06c3f..e5f4ce622ee6 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -298,11 +298,6 @@ static inline void kernel_signal_stop(void) schedule(); } -#ifdef __ARCH_SI_TRAPNO -# define ___ARCH_SI_TRAPNO(_a1) , _a1 -#else -# define ___ARCH_SI_TRAPNO(_a1) -#endif #ifdef __ia64__ # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 #else @@ -310,14 +305,11 @@ static inline void kernel_signal_stop(void) #endif int force_sig_fault_to_task(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); int force_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)); int send_sig_fault(int sig, int code, void __user *addr - ___ARCH_SI_TRAPNO(int trapno) ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) , struct task_struct *t); @@ -329,6 +321,10 @@ int force_sig_pkuerr(void __user *addr, u32 pkey); int force_sig_perf(void __user *addr, u32 type, u64 sig_data); int force_sig_ptrace_errno_trap(int errno, void __user *addr); +int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno); +int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, + struct task_struct *t); +int force_sig_seccomp(int syscall, int reason, bool force_coredump); extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *); extern void force_sigsegv(int sig); @@ -714,6 +710,12 @@ static inline void unlock_task_sighand(struct task_struct *task, spin_unlock_irqrestore(&task->sighand->siglock, *flags); } +#ifdef CONFIG_LOCKDEP +extern void lockdep_assert_task_sighand_held(struct task_struct *task); +#else +static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { } +#endif + static inline unsigned long task_rlimit(const struct task_struct *task, unsigned int limit) { diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index db2c0f34aaaf..304f431178fd 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h @@ -28,30 +28,12 @@ enum { sysctl_hung_task_timeout_secs = 0 }; extern unsigned int sysctl_sched_child_runs_first; -extern unsigned int sysctl_sched_latency; -extern unsigned int sysctl_sched_min_granularity; -extern unsigned int sysctl_sched_wakeup_granularity; - enum sched_tunable_scaling { SCHED_TUNABLESCALING_NONE, SCHED_TUNABLESCALING_LOG, SCHED_TUNABLESCALING_LINEAR, SCHED_TUNABLESCALING_END, }; -extern unsigned int sysctl_sched_tunable_scaling; - -extern unsigned int sysctl_numa_balancing_scan_delay; -extern unsigned int sysctl_numa_balancing_scan_period_min; -extern unsigned int sysctl_numa_balancing_scan_period_max; -extern unsigned int sysctl_numa_balancing_scan_size; - -#ifdef CONFIG_SCHED_DEBUG -extern __read_mostly unsigned int sysctl_sched_migration_cost; -extern __read_mostly unsigned int sysctl_sched_nr_migrate; - -extern int sysctl_resched_latency_warn_ms; -extern int sysctl_resched_latency_warn_once; -#endif /* * control realtime throttling: diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 2462f7d07695..00ed419dd464 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h @@ -4,6 +4,7 @@ #include <linux/uidgid.h> #include <linux/atomic.h> +#include <linux/percpu_counter.h> #include <linux/refcount.h> #include <linux/ratelimit.h> @@ -13,7 +14,7 @@ struct user_struct { refcount_t __count; /* reference count */ #ifdef CONFIG_EPOLL - atomic_long_t epoll_watches; /* The number of file descriptors currently watched */ + struct percpu_counter epoll_watches; /* The number of file descriptors currently watched */ #endif unsigned long unix_inflight; /* How many files in flight in unix sockets */ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */ diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 26a2013ac39c..06cd8fb2f409 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -42,8 +42,11 @@ struct wake_q_head { #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01) -#define DEFINE_WAKE_Q(name) \ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } +#define WAKE_Q_HEAD_INITIALIZER(name) \ + { WAKE_Q_TAIL, &name.first } + +#define DEFINE_WAKE_Q(name) \ + struct wake_q_head name = WAKE_Q_HEAD_INITIALIZER(name) static inline void wake_q_init(struct wake_q_head *head) { diff --git a/include/linux/security.h b/include/linux/security.h index 24eda04221e9..5b7288521300 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -120,10 +120,11 @@ enum lockdown_reason { LOCKDOWN_MMIOTRACE, LOCKDOWN_DEBUGFS, LOCKDOWN_XMON_WR, + LOCKDOWN_BPF_WRITE_USER, LOCKDOWN_INTEGRITY_MAX, LOCKDOWN_KCORE, LOCKDOWN_KPROBES, - LOCKDOWN_BPF_READ, + LOCKDOWN_BPF_READ_KERNEL, LOCKDOWN_PERF, LOCKDOWN_TRACEFS, LOCKDOWN_XMON_RW, diff --git a/include/linux/serdev.h b/include/linux/serdev.h index 9f14f9c12ec4..3368c261ab62 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -327,4 +327,18 @@ static inline int serdev_tty_port_unregister(struct tty_port *port) } #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ +struct acpi_resource; +struct acpi_resource_uart_serialbus; + +#ifdef CONFIG_ACPI +bool serdev_acpi_get_uart_resource(struct acpi_resource *ares, + struct acpi_resource_uart_serialbus **uart); +#else +static inline bool serdev_acpi_get_uart_resource(struct acpi_resource *ares, + struct acpi_resource_uart_serialbus **uart) +{ + return false; +} +#endif /* CONFIG_ACPI */ + #endif /*_LINUX_SERDEV_H */ diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 52d7fb92a69d..c58cc142d23f 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -518,6 +518,25 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) if (sysrq_ch) handle_sysrq(sysrq_ch); } + +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + int sysrq_ch; + + if (!port->has_sysrq) { + spin_unlock_irqrestore(&port->lock, flags); + return; + } + + sysrq_ch = port->sysrq_ch; + port->sysrq_ch = 0; + + spin_unlock_irqrestore(&port->lock, flags); + + if (sysrq_ch) + handle_sysrq(sysrq_ch); +} #else /* CONFIG_MAGIC_SYSRQ_SERIAL */ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { @@ -531,6 +550,11 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port) { spin_unlock(&port->lock); } +static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port, + unsigned long flags) +{ + spin_unlock_irqrestore(&port->lock, flags); +} #endif /* CONFIG_MAGIC_SYSRQ_SERIAL */ /* diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h index f6c3323fc4c5..cf0de4a86640 100644 --- a/include/linux/serial_s3c.h +++ b/include/linux/serial_s3c.h @@ -27,6 +27,15 @@ #define S3C2410_UERSTAT (0x14) #define S3C2410_UFSTAT (0x18) #define S3C2410_UMSTAT (0x1C) +#define USI_CON (0xC4) +#define USI_OPTION (0xC8) + +#define USI_CON_RESET (1<<0) +#define USI_CON_RESET_MASK (1<<0) + +#define USI_OPTION_HWACG_CLKREQ_ON (1<<1) +#define USI_OPTION_HWACG_CLKSTOP_ON (1<<2) +#define USI_OPTION_HWACG_MASK (3<<1) #define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3)) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 8e775ce517bb..166158b6e917 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -18,6 +18,7 @@ struct shmem_inode_info { unsigned long flags; unsigned long alloced; /* data pages alloced to file */ unsigned long swapped; /* subtotal assigned to swap */ + pgoff_t fallocend; /* highest fallocate endindex */ struct list_head shrinklist; /* shrinkable hpage inodes */ struct list_head swaplist; /* chain of maybes on swap */ struct shared_policy policy; /* NUMA memory alloc policy */ @@ -31,7 +32,7 @@ struct shmem_sb_info { struct percpu_counter used_blocks; /* How many are allocated */ unsigned long max_inodes; /* How many inodes are allowed */ unsigned long free_inodes; /* How many are left for allocation */ - spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ + raw_spinlock_t stat_lock; /* Serialize shmem_sb_info changes */ umode_t mode; /* Mount mode for root directory */ unsigned char huge; /* Whether to try for hugepages */ kuid_t uid; /* Mount uid for root directory */ @@ -85,7 +86,12 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); extern int shmem_unuse(unsigned int type, bool frontswap, unsigned long *fs_pages_to_unuse); -extern bool shmem_huge_enabled(struct vm_area_struct *vma); +extern bool shmem_is_huge(struct vm_area_struct *vma, + struct inode *inode, pgoff_t index); +static inline bool shmem_huge_enabled(struct vm_area_struct *vma) +{ + return shmem_is_huge(vma, file_inode(vma->vm_file), vma->vm_pgoff); +} extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); @@ -93,9 +99,8 @@ extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, /* Flag allocation requirements to shmem_getpage */ enum sgp_type { SGP_READ, /* don't exceed i_size, don't allocate page */ + SGP_NOALLOC, /* similar, but fail on hole or use fallocated page */ SGP_CACHE, /* don't exceed i_size, may allocate page */ - SGP_NOHUGE, /* like SGP_CACHE, but no huge pages */ - SGP_HUGE, /* like SGP_CACHE, huge pages preferred */ SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */ SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ }; @@ -119,6 +124,18 @@ static inline bool shmem_file(struct file *file) return shmem_mapping(file->f_mapping); } +/* + * If fallocate(FALLOC_FL_KEEP_SIZE) has been used, there may be pages + * beyond i_size's notion of EOF, which fallocate has committed to reserving: + * which split_huge_page() must therefore not delete. This use of a single + * "fallocend" per inode errs on the side of not deleting a reservation when + * in doubt: there are plenty of cases when it preserves unreserved pages. + */ +static inline pgoff_t shmem_fallocend(struct inode *inode, pgoff_t eof) +{ + return max(eof, SHMEM_I(inode)->fallocend); +} + extern bool shmem_charge(struct inode *inode, long pages); extern void shmem_uncharge(struct inode *inode, long pages); diff --git a/include/linux/signal.h b/include/linux/signal.h index 3454c7ff0778..3f96a6374e4f 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -44,7 +44,7 @@ enum siginfo_layout { SIL_FAULT_MCEERR, SIL_FAULT_BNDERR, SIL_FAULT_PKUERR, - SIL_PERF_EVENT, + SIL_FAULT_PERF_EVENT, SIL_CHLD, SIL_RT, SIL_SYS, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b2db9cd9a73f..6bdb0db3e825 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -689,6 +689,7 @@ typedef unsigned char *sk_buff_data_t; * CHECKSUM_UNNECESSARY (max 3) * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB + * @slow_gro: state present at GRO time, slower prepare step required * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @secmark: security marking @@ -863,13 +864,14 @@ struct sk_buff { __u8 tc_skip_classify:1; __u8 tc_at_ingress:1; #endif -#ifdef CONFIG_NET_REDIRECT __u8 redirected:1; +#ifdef CONFIG_NET_REDIRECT __u8 from_ingress:1; #endif #ifdef CONFIG_TLS_DEVICE __u8 decrypted:1; #endif + __u8 slow_gro:1; #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ @@ -990,6 +992,7 @@ static inline struct dst_entry *skb_dst(const struct sk_buff *skb) */ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) { + skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst; } @@ -1006,6 +1009,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) { WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); + skb->slow_gro |= !!dst; skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; } @@ -1179,6 +1183,7 @@ static inline struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, gfp_t gfp_mask); struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom); +struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom); struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, gfp_t priority); int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, @@ -4216,6 +4221,7 @@ static inline unsigned long skb_get_nfct(const struct sk_buff *skb) static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct) { #if IS_ENABLED(CONFIG_NF_CONNTRACK) + skb->slow_gro |= !!nfct; skb->_nfct = nfct; #endif } @@ -4375,6 +4381,7 @@ static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src) #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) nf_conntrack_put(skb_nfct(dst)); #endif + dst->slow_gro = src->slow_gro; __nf_copy(dst, src, true); } @@ -4664,17 +4671,13 @@ static inline __wsum lco_csum(struct sk_buff *skb) static inline bool skb_is_redirected(const struct sk_buff *skb) { -#ifdef CONFIG_NET_REDIRECT return skb->redirected; -#else - return false; -#endif } static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) { -#ifdef CONFIG_NET_REDIRECT skb->redirected = 1; +#ifdef CONFIG_NET_REDIRECT skb->from_ingress = from_ingress; if (skb->from_ingress) skb->tstamp = 0; @@ -4683,9 +4686,7 @@ static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress) static inline void skb_reset_redirect(struct sk_buff *skb) { -#ifdef CONFIG_NET_REDIRECT skb->redirected = 0; -#endif } static inline bool skb_csum_is_sctp(struct sk_buff *skb) @@ -4711,11 +4712,9 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb) } #ifdef CONFIG_PAGE_POOL -static inline void skb_mark_for_recycle(struct sk_buff *skb, struct page *page, - struct page_pool *pp) +static inline void skb_mark_for_recycle(struct sk_buff *skb) { skb->pp_recycle = 1; - page_pool_store_mem_info(page, pp); } #endif diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 96f319099744..14ab0c0bc924 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -285,11 +285,45 @@ static inline struct sk_psock *sk_psock(const struct sock *sk) return rcu_dereference_sk_user_data(sk); } +static inline void sk_psock_set_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + set_bit(bit, &psock->state); +} + +static inline void sk_psock_clear_state(struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + clear_bit(bit, &psock->state); +} + +static inline bool sk_psock_test_state(const struct sk_psock *psock, + enum sk_psock_state_bits bit) +{ + return test_bit(bit, &psock->state); +} + +static inline void sock_drop(struct sock *sk, struct sk_buff *skb) +{ + sk_drops_add(sk, skb); + kfree_skb(skb); +} + +static inline void drop_sk_msg(struct sk_psock *psock, struct sk_msg *msg) +{ + if (msg->skb) + sock_drop(psock->sk, msg->skb); + kfree(msg); +} + static inline void sk_psock_queue_msg(struct sk_psock *psock, struct sk_msg *msg) { spin_lock_bh(&psock->ingress_lock); - list_add_tail(&msg->list, &psock->ingress_msg); + if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) + list_add_tail(&msg->list, &psock->ingress_msg); + else + drop_sk_msg(psock, msg); spin_unlock_bh(&psock->ingress_lock); } @@ -406,24 +440,6 @@ static inline void sk_psock_restore_proto(struct sock *sk, psock->psock_update_sk_prot(sk, psock, true); } -static inline void sk_psock_set_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - set_bit(bit, &psock->state); -} - -static inline void sk_psock_clear_state(struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - clear_bit(bit, &psock->state); -} - -static inline bool sk_psock_test_state(const struct sk_psock *psock, - enum sk_psock_state_bits bit) -{ - return test_bit(bit, &psock->state); -} - static inline struct sk_psock *sk_psock_get(struct sock *sk) { struct sk_psock *psock; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index dcde82a4434c..85499f0586b0 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -10,6 +10,7 @@ #include <linux/kfence.h> #include <linux/kobject.h> #include <linux/reciprocal_div.h> +#include <linux/local_lock.h> enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ @@ -40,6 +41,10 @@ enum stat_item { CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ NR_SLUB_STAT_ITEMS }; +/* + * When changing the layout, make sure freelist and tid are still compatible + * with this_cpu_cmpxchg_double() alignment requirements. + */ struct kmem_cache_cpu { void **freelist; /* Pointer to next available object */ unsigned long tid; /* Globally unique transaction id */ @@ -47,6 +52,7 @@ struct kmem_cache_cpu { #ifdef CONFIG_SLUB_CPU_PARTIAL struct page *partial; /* Partially allocated frozen slabs */ #endif + local_lock_t lock; /* Protects the fields above */ #ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif diff --git a/include/linux/soc/marvell/octeontx2/asm.h b/include/linux/soc/marvell/octeontx2/asm.h index 28c04d918f0f..fa1d6af0164e 100644 --- a/include/linux/soc/marvell/octeontx2/asm.h +++ b/include/linux/soc/marvell/octeontx2/asm.h @@ -22,12 +22,17 @@ : [rs]"r" (ioaddr)); \ (result); \ }) +/* + * STEORL store to memory with release semantics. + * This will avoid using DMB barrier after each LMTST + * operation. + */ #define cn10k_lmt_flush(val, addr) \ ({ \ __asm__ volatile(".cpu generic+lse\n" \ - "steor %x[rf],[%[rs]]" \ - : [rf]"+r"(val) \ - : [rs]"r"(addr)); \ + "steorl %x[rf],[%[rs]]" \ + : [rf] "+r"(val) \ + : [rs] "r"(addr)); \ }) #else #define otx2_lmt_flush(ioaddr) ({ 0; }) diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h index f2645ec52520..60e66fc9b6bf 100644 --- a/include/linux/soc/qcom/smd-rpm.h +++ b/include/linux/soc/qcom/smd-rpm.h @@ -29,6 +29,7 @@ struct qcom_smd_rpm; #define QCOM_SMD_RPM_NCPB 0x6270636E #define QCOM_SMD_RPM_OCMEM_PWR 0x706d636f #define QCOM_SMD_RPM_QPIC_CLK 0x63697071 +#define QCOM_SMD_RPM_QUP_CLK 0x707571 #define QCOM_SMD_RPM_SMPA 0x61706d73 #define QCOM_SMD_RPM_SMPB 0x62706d73 #define QCOM_SMD_RPM_SPDM 0x63707362 diff --git a/include/linux/socket.h b/include/linux/socket.h index 0d8e3dcb7f88..041d6032a348 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -223,8 +223,11 @@ struct ucred { * reuses AF_INET address family */ #define AF_XDP 44 /* XDP sockets */ +#define AF_MCTP 45 /* Management component + * transport protocol + */ -#define AF_MAX 45 /* For now.. */ +#define AF_MAX 46 /* For now.. */ /* Protocol families, same as address families. */ #define PF_UNSPEC AF_UNSPEC @@ -274,6 +277,7 @@ struct ucred { #define PF_QIPCRTR AF_QIPCRTR #define PF_SMC AF_SMC #define PF_XDP AF_XDP +#define PF_MCTP AF_MCTP #define PF_MAX AF_MAX /* Maximum queue length specifiable by listen. */ @@ -421,6 +425,9 @@ extern int __sys_accept4_file(struct file *file, unsigned file_flags, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags, unsigned long nofile); +extern struct file *do_accept(struct file *file, unsigned file_flags, + struct sockaddr __user *upeer_sockaddr, + int __user *upeer_addrlen, int flags); extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr, int __user *upeer_addrlen, int flags); extern int __sys_socket(int family, int type, int protocol); diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index a48ac3e77301..76ce3f3ac0f2 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -661,6 +661,8 @@ struct sdw_slave_ops { * initialized * @first_interrupt_done: status flag tracking if the interrupt handling * for a Slave happens for the first time after enumeration + * @is_mockup_device: status flag used to squelch errors in the command/control + * protocol for SoundWire mockup devices */ struct sdw_slave { struct sdw_slave_id id; @@ -683,6 +685,7 @@ struct sdw_slave { struct completion initialization_complete; u32 unattach_request; bool first_interrupt_done; + bool is_mockup_device; }; #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index 1ebea7764011..8a463b8fc12a 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -7,6 +7,85 @@ #include <linux/irqreturn.h> #include <linux/soundwire/sdw.h> +#define SDW_SHIM_BASE 0x2C000 +#define SDW_ALH_BASE 0x2C800 +#define SDW_LINK_BASE 0x30000 +#define SDW_LINK_SIZE 0x10000 + +/* Intel SHIM Registers Definition */ +#define SDW_SHIM_LCAP 0x0 +#define SDW_SHIM_LCTL 0x4 +#define SDW_SHIM_IPPTR 0x8 +#define SDW_SHIM_SYNC 0xC + +#define SDW_SHIM_CTLSCAP(x) (0x010 + 0x60 * (x)) +#define SDW_SHIM_CTLS0CM(x) (0x012 + 0x60 * (x)) +#define SDW_SHIM_CTLS1CM(x) (0x014 + 0x60 * (x)) +#define SDW_SHIM_CTLS2CM(x) (0x016 + 0x60 * (x)) +#define SDW_SHIM_CTLS3CM(x) (0x018 + 0x60 * (x)) +#define SDW_SHIM_PCMSCAP(x) (0x020 + 0x60 * (x)) + +#define SDW_SHIM_PCMSYCHM(x, y) (0x022 + (0x60 * (x)) + (0x2 * (y))) +#define SDW_SHIM_PCMSYCHC(x, y) (0x042 + (0x60 * (x)) + (0x2 * (y))) +#define SDW_SHIM_PDMSCAP(x) (0x062 + 0x60 * (x)) +#define SDW_SHIM_IOCTL(x) (0x06C + 0x60 * (x)) +#define SDW_SHIM_CTMCTL(x) (0x06E + 0x60 * (x)) + +#define SDW_SHIM_WAKEEN 0x190 +#define SDW_SHIM_WAKESTS 0x192 + +#define SDW_SHIM_LCTL_SPA BIT(0) +#define SDW_SHIM_LCTL_SPA_MASK GENMASK(3, 0) +#define SDW_SHIM_LCTL_CPA BIT(8) +#define SDW_SHIM_LCTL_CPA_MASK GENMASK(11, 8) + +#define SDW_SHIM_SYNC_SYNCPRD_VAL_24 (24000 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD_VAL_38_4 (38400 / SDW_CADENCE_GSYNC_KHZ - 1) +#define SDW_SHIM_SYNC_SYNCPRD GENMASK(14, 0) +#define SDW_SHIM_SYNC_SYNCCPU BIT(15) +#define SDW_SHIM_SYNC_CMDSYNC_MASK GENMASK(19, 16) +#define SDW_SHIM_SYNC_CMDSYNC BIT(16) +#define SDW_SHIM_SYNC_SYNCGO BIT(24) + +#define SDW_SHIM_PCMSCAP_ISS GENMASK(3, 0) +#define SDW_SHIM_PCMSCAP_OSS GENMASK(7, 4) +#define SDW_SHIM_PCMSCAP_BSS GENMASK(12, 8) + +#define SDW_SHIM_PCMSYCM_LCHN GENMASK(3, 0) +#define SDW_SHIM_PCMSYCM_HCHN GENMASK(7, 4) +#define SDW_SHIM_PCMSYCM_STREAM GENMASK(13, 8) +#define SDW_SHIM_PCMSYCM_DIR BIT(15) + +#define SDW_SHIM_PDMSCAP_ISS GENMASK(3, 0) +#define SDW_SHIM_PDMSCAP_OSS GENMASK(7, 4) +#define SDW_SHIM_PDMSCAP_BSS GENMASK(12, 8) +#define SDW_SHIM_PDMSCAP_CPSS GENMASK(15, 13) + +#define SDW_SHIM_IOCTL_MIF BIT(0) +#define SDW_SHIM_IOCTL_CO BIT(1) +#define SDW_SHIM_IOCTL_COE BIT(2) +#define SDW_SHIM_IOCTL_DO BIT(3) +#define SDW_SHIM_IOCTL_DOE BIT(4) +#define SDW_SHIM_IOCTL_BKE BIT(5) +#define SDW_SHIM_IOCTL_WPDD BIT(6) +#define SDW_SHIM_IOCTL_CIBD BIT(8) +#define SDW_SHIM_IOCTL_DIBD BIT(9) + +#define SDW_SHIM_CTMCTL_DACTQE BIT(0) +#define SDW_SHIM_CTMCTL_DODS BIT(1) +#define SDW_SHIM_CTMCTL_DOAIS GENMASK(4, 3) + +#define SDW_SHIM_WAKEEN_ENABLE BIT(0) +#define SDW_SHIM_WAKESTS_STATUS BIT(0) + +/* Intel ALH Register definitions */ +#define SDW_ALH_STRMZCFG(x) (0x000 + (0x4 * (x))) +#define SDW_ALH_NUM_STREAMS 64 + +#define SDW_ALH_STRMZCFG_DMAT_VAL 0x3 +#define SDW_ALH_STRMZCFG_DMAT GENMASK(7, 0) +#define SDW_ALH_STRMZCFG_CHN GENMASK(19, 16) + /** * struct sdw_intel_stream_params_data: configuration passed during * the @params_stream callback, e.g. for interaction with DSP @@ -116,6 +195,8 @@ struct sdw_intel_slave_id { * @link_list: list to handle interrupts across all links * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers. * @shim_mask: flags to track initialization of SHIM shared registers + * @shim_base: sdw shim base. + * @alh_base: sdw alh base. */ struct sdw_intel_ctx { int count; @@ -128,6 +209,8 @@ struct sdw_intel_ctx { struct list_head link_list; struct mutex shim_lock; /* lock for access to shared SHIM registers */ u32 shim_mask; + u32 shim_base; + u32 alh_base; }; /** @@ -146,6 +229,8 @@ struct sdw_intel_ctx { * machine-specific quirks are handled in the DSP driver. * @clock_stop_quirks: mask array of possible behaviors requested by the * DSP driver. The quirks are common for all links for now. + * @shim_base: sdw shim base. + * @alh_base: sdw alh base. */ struct sdw_intel_res { int count; @@ -157,6 +242,8 @@ struct sdw_intel_res { struct device *dev; u32 link_mask; u32 clock_stop_quirks; + u32 shim_base; + u32 alh_base; }; /* diff --git a/include/linux/spi/max7301.h b/include/linux/spi/max7301.h index 433c20e2f46e..21449067aedb 100644 --- a/include/linux/spi/max7301.h +++ b/include/linux/spi/max7301.h @@ -2,7 +2,7 @@ #ifndef LINUX_SPI_MAX7301_H #define LINUX_SPI_MAX7301_H -#include <linux/gpio.h> +#include <linux/gpio/driver.h> /* * Some registers must be read back to modify. diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 97b8d12b5f2b..8371bca13729 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -147,7 +147,11 @@ extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer); * not using a GPIO line) * @word_delay: delay to be inserted between consecutive * words of a transfer - * + * @cs_setup: delay to be introduced by the controller after CS is asserted + * @cs_hold: delay to be introduced by the controller before CS is deasserted + * @cs_inactive: delay to be introduced by the controller after CS is + * deasserted. If @cs_change_delay is used from @spi_transfer, then the + * two delays will be added up. * @statistics: statistics for the spi_device * * A @spi_device is used to interchange data between an SPI slave @@ -188,6 +192,10 @@ struct spi_device { int cs_gpio; /* LEGACY: chip select gpio */ struct gpio_desc *cs_gpiod; /* chip select gpio desc */ struct spi_delay word_delay; /* inter-word delay */ + /* CS delays */ + struct spi_delay cs_setup; + struct spi_delay cs_hold; + struct spi_delay cs_inactive; /* the statistics */ struct spi_statistics statistics; @@ -339,6 +347,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @max_speed_hz: Highest supported transfer speed * @flags: other constraints relevant to this driver * @slave: indicates that this is an SPI slave controller + * @devm_allocated: whether the allocation of this struct is devres-managed * @max_transfer_size: function that returns the max transfer size for * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used. * @max_message_size: function that returns the max message size for @@ -412,11 +421,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * controller has native support for memory like operations. * @unprepare_message: undo any work done by prepare_message(). * @slave_abort: abort the ongoing transfer request on an SPI slave controller - * @cs_setup: delay to be introduced by the controller after CS is asserted - * @cs_hold: delay to be introduced by the controller before CS is deasserted - * @cs_inactive: delay to be introduced by the controller after CS is - * deasserted. If @cs_change_delay is used from @spi_transfer, then the - * two delays will be added up. * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per * CS number. Any individual value may be -ENOENT for CS lines that * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods @@ -511,7 +515,7 @@ struct spi_controller { #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */ - /* flag indicating this is a non-devres managed controller */ + /* flag indicating if the allocation of this struct is devres-managed */ bool devm_allocated; /* flag indicating this is an SPI slave controller */ @@ -550,8 +554,7 @@ struct spi_controller { * to configure specific CS timing through spi_set_cs_timing() after * spi_setup(). */ - int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup, - struct spi_delay *hold, struct spi_delay *inactive); + int (*set_cs_timing)(struct spi_device *spi); /* bidirectional bulk transfers * @@ -638,11 +641,6 @@ struct spi_controller { /* Optimized handlers for SPI memory-like operations. */ const struct spi_controller_mem_ops *mem_ops; - /* CS delays */ - struct spi_delay cs_setup; - struct spi_delay cs_hold; - struct spi_delay cs_inactive; - /* gpio chip select */ int *cs_gpios; struct gpio_desc **cs_gpiods; diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 79897841a2cc..45310ea1b1d7 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -12,6 +12,8 @@ * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the * initializers * + * linux/spinlock_types_raw: + * The raw types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -31,6 +33,8 @@ * contains the generic, simplified UP spinlock type. * (which is an empty structure on non-debug builds) * + * linux/spinlock_types_raw: + * The raw RT types and initializers * linux/spinlock_types.h: * defines the generic type and initializers * @@ -308,8 +312,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) 1 : ({ local_irq_restore(flags); 0; }); \ }) -/* Include rwlock functions */ +#ifndef CONFIG_PREEMPT_RT +/* Include rwlock functions for !RT */ #include <linux/rwlock.h> +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -320,6 +326,9 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include <linux/spinlock_api_up.h> #endif +/* Non PREEMPT_RT kernel, map to raw spinlocks: */ +#ifndef CONFIG_PREEMPT_RT + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -454,6 +463,10 @@ static __always_inline int spin_is_contended(spinlock_t *lock) #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) +#else /* !CONFIG_PREEMPT_RT */ +# include <linux/spinlock_rt.h> +#endif /* CONFIG_PREEMPT_RT */ + /* * Pull the atomic_t declaration: * (asm-mips/atomic.h needs above definitions) diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 19a9be9d97ee..6b8e1a0b137b 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -187,6 +187,9 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } +/* PREEMPT_RT has its own rwlock implementation */ +#ifndef CONFIG_PREEMPT_RT #include <linux/rwlock_api_smp.h> +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 index 000000000000..835aedaf68ac --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu); +#else +static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key, bool percpu) +{ +} +#endif + +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, false); \ +} while (0) + +#define local_spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_base_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key, true); \ +} while (0) + +extern void rt_spin_lock(spinlock_t *lock); +extern void rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock); +extern void rt_spin_unlock(spinlock_t *lock); +extern void rt_spin_lock_unlock(spinlock_t *lock); +extern int rt_spin_trylock_bh(spinlock_t *lock); +extern int rt_spin_trylock(spinlock_t *lock); + +static __always_inline void spin_lock(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#ifdef CONFIG_LOCKDEP +# define __spin_lock_nested(lock, subclass) \ + rt_spin_lock_nested(lock, subclass) + +# define __spin_lock_nest_lock(lock, nest_lock) \ + do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + rt_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \ + } while (0) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __spin_lock_nested(lock, subclass); \ + } while (0) + +#else + /* + * Always evaluate the 'subclass' argument to avoid that the compiler + * warns about set-but-not-used variables when building with + * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1. + */ +# define __spin_lock_nested(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_nest_lock(lock, subclass) spin_lock(((void)(subclass), (lock))) +# define __spin_lock_irqsave_nested(lock, flags, subclass) \ + spin_lock_irqsave(((void)(subclass), (lock)), flags) +#endif + +#define spin_lock_nested(lock, subclass) \ + __spin_lock_nested(lock, subclass) + +#define spin_lock_nest_lock(lock, nest_lock) \ + __spin_lock_nest_lock(lock, nest_lock) + +#define spin_lock_irqsave_nested(lock, flags, subclass) \ + __spin_lock_irqsave_nested(lock, flags, subclass) + +static __always_inline void spin_lock_bh(spinlock_t *lock) +{ + /* Investigate: Drop bh when blocking ? */ + local_bh_disable(); + rt_spin_lock(lock); +} + +static __always_inline void spin_lock_irq(spinlock_t *lock) +{ + rt_spin_lock(lock); +} + +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) + +static __always_inline void spin_unlock(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_bh(spinlock_t *lock) +{ + rt_spin_unlock(lock); + local_bh_enable(); +} + +static __always_inline void spin_unlock_irq(spinlock_t *lock) +{ + rt_spin_unlock(lock); +} + +static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, + unsigned long flags) +{ + rt_spin_unlock(lock); +} + +#define spin_trylock(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define spin_trylock_bh(lock) \ + __cond_lock(lock, rt_spin_trylock_bh(lock)) + +#define spin_trylock_irq(lock) \ + __cond_lock(lock, rt_spin_trylock(lock)) + +#define __spin_trylock_irqsave(lock, flags) \ +({ \ + int __locked; \ + \ + typecheck(unsigned long, flags); \ + flags = 0; \ + __locked = spin_trylock(lock); \ + __locked; \ +}) + +#define spin_trylock_irqsave(lock, flags) \ + __cond_lock(lock, __spin_trylock_irqsave(lock, flags)) + +#define spin_is_contended(lock) (((void)(lock), 0)) + +static inline int spin_is_locked(spinlock_t *lock) +{ + return rt_mutex_base_is_locked(&lock->lock); +} + +#define assert_spin_locked(lock) BUG_ON(!spin_is_locked(lock)) + +#include <linux/rwlock_rt.h> + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index b981caafe8bf..2dfa35ffec76 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,65 +9,11 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_SMP) -# include <asm/spinlock_types.h> -#else -# include <linux/spinlock_types_up.h> -#endif - -#include <linux/lockdep_types.h> +#include <linux/spinlock_types_raw.h> -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define RAW_SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_SPIN, \ - } -# define SPIN_DEP_MAP_INIT(lockname) \ - .dep_map = { \ - .name = #lockname, \ - .wait_type_inner = LD_WAIT_CONFIG, \ - } -#else -# define RAW_SPIN_DEP_MAP_INIT(lockname) -# define SPIN_DEP_MAP_INIT(lockname) -#endif - -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, -#else -# define SPIN_DEBUG_INIT(lockname) -#endif - -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - RAW_SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +#ifndef CONFIG_PREEMPT_RT +/* Non PREEMPT_RT kernels map spinlock to raw_spinlock */ typedef struct spinlock { union { struct raw_spinlock rlock; @@ -96,6 +42,35 @@ typedef struct spinlock { #define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +#else /* !CONFIG_PREEMPT_RT */ + +/* PREEMPT_RT kernels map spinlock to rt_mutex */ +#include <linux/rtmutex.h> + +typedef struct spinlock { + struct rt_mutex_base lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#define __SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + SPIN_DEP_MAP_INIT(name) \ + } + +#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \ + { \ + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \ + LOCAL_SPIN_DEP_MAP_INIT(name) \ + } + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#endif /* CONFIG_PREEMPT_RT */ + #include <linux/rwlock_types.h> #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 index 000000000000..91cb36b65a17 --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,73 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H + +#include <linux/types.h> + +#if defined(CONFIG_SMP) +# include <asm/spinlock_types.h> +#else +# include <linux/spinlock_types_up.h> +#endif + +#include <linux/lockdep_types.h> + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RAW_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_SPIN, \ + } +# define SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + } + +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \ + .dep_map = { \ + .name = #lockname, \ + .wait_type_inner = LD_WAIT_CONFIG, \ + .lock_type = LD_LOCK_PERCPU, \ + } +#else +# define RAW_SPIN_DEP_MAP_INIT(lockname) +# define SPIN_DEP_MAP_INIT(lockname) +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +{ \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + RAW_SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif /* __LINUX_SPINLOCK_TYPES_RAW_H */ diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index 0e0cf4d6a72a..6cfaa0a9a9b9 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h @@ -61,7 +61,7 @@ static inline int __srcu_read_lock(struct srcu_struct *ssp) int idx; idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; - WRITE_ONCE(ssp->srcu_lock_nesting[idx], ssp->srcu_lock_nesting[idx] + 1); + WRITE_ONCE(ssp->srcu_lock_nesting[idx], READ_ONCE(ssp->srcu_lock_nesting[idx]) + 1); return idx; } @@ -81,11 +81,11 @@ static inline void srcu_torture_stats_print(struct srcu_struct *ssp, { int idx; - idx = ((READ_ONCE(ssp->srcu_idx) + 1) & 0x2) >> 1; + idx = ((data_race(READ_ONCE(ssp->srcu_idx)) + 1) & 0x2) >> 1; pr_alert("%s%s Tiny SRCU per-CPU(idx=%d): (%hd,%hd)\n", tt, tf, idx, - READ_ONCE(ssp->srcu_lock_nesting[!idx]), - READ_ONCE(ssp->srcu_lock_nesting[idx])); + data_race(READ_ONCE(ssp->srcu_lock_nesting[!idx])), + data_race(READ_ONCE(ssp->srcu_lock_nesting[idx]))); } #endif diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 0d5a2691e7e9..f9b53acb4e02 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -7,7 +7,7 @@ #include <linux/types.h> #include <linux/spinlock.h> #include <linux/pci.h> -#include <linux/gpio.h> +#include <linux/gpio/driver.h> #include <linux/mod_devicetable.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> diff --git a/include/linux/ssb/ssb_driver_extif.h b/include/linux/ssb/ssb_driver_extif.h index 3f8bc973d67d..19253bfacd1a 100644 --- a/include/linux/ssb/ssb_driver_extif.h +++ b/include/linux/ssb/ssb_driver_extif.h @@ -197,7 +197,7 @@ struct ssb_extif { static inline bool ssb_extif_available(struct ssb_extif *extif) { - return 0; + return false; } static inline diff --git a/include/linux/stat.h b/include/linux/stat.h index fff27e603814..7df06931f25d 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h @@ -34,6 +34,10 @@ struct kstat { STATX_ATTR_ENCRYPTED | \ STATX_ATTR_VERITY \ )/* Attrs corresponding to FS_*_FL flags */ +#define KSTAT_ATTR_VFS_FLAGS \ + (STATX_ATTR_IMMUTABLE | \ + STATX_ATTR_APPEND \ + ) /* Attrs corresponding to S_* flags that are enforced by the VFS */ u64 ino; dev_t dev; dev_t rdev; diff --git a/include/linux/static_call.h b/include/linux/static_call.h index fc94faa53b5b..3e56a9751c06 100644 --- a/include/linux/static_call.h +++ b/include/linux/static_call.h @@ -17,11 +17,17 @@ * DECLARE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL(name, func); * DEFINE_STATIC_CALL_NULL(name, typename); + * DEFINE_STATIC_CALL_RET0(name, typename); + * + * __static_call_return0; + * * static_call(name)(args...); * static_call_cond(name)(args...); * static_call_update(name, func); * static_call_query(name); * + * EXPORT_STATIC_CALL{,_TRAMP}{,_GPL}() + * * Usage example: * * # Start with the following functions (with identical prototypes): @@ -96,6 +102,33 @@ * To query which function is currently set to be called, use: * * func = static_call_query(name); + * + * + * DEFINE_STATIC_CALL_RET0 / __static_call_return0: + * + * Just like how DEFINE_STATIC_CALL_NULL() / static_call_cond() optimize the + * conditional void function call, DEFINE_STATIC_CALL_RET0 / + * __static_call_return0 optimize the do nothing return 0 function. + * + * This feature is strictly UB per the C standard (since it casts a function + * pointer to a different signature) and relies on the architecture ABI to + * make things work. In particular it relies on Caller Stack-cleanup and the + * whole return register being clobbered for short return values. All normal + * CDECL style ABIs conform. + * + * In particular the x86_64 implementation replaces the 5 byte CALL + * instruction at the callsite with a 5 byte clear of the RAX register, + * completely eliding any function call overhead. + * + * Notably argument setup is unconditional. + * + * + * EXPORT_STATIC_CALL() vs EXPORT_STATIC_CALL_TRAMP(): + * + * The difference is that the _TRAMP variant tries to only export the + * trampoline with the result that a module can use static_call{,_cond}() but + * not static_call_update(). + * */ #include <linux/types.h> diff --git a/include/linux/stdarg.h b/include/linux/stdarg.h new file mode 100644 index 000000000000..c8dc7f4f390c --- /dev/null +++ b/include/linux/stdarg.h @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#ifndef _LINUX_STDARG_H +#define _LINUX_STDARG_H + +typedef __builtin_va_list va_list; +#define va_start(v, l) __builtin_va_start(v, l) +#define va_end(v) __builtin_va_end(v) +#define va_arg(v, T) __builtin_va_arg(v, T) +#define va_copy(d, s) __builtin_va_copy(d, s) + +#endif diff --git a/include/linux/string.h b/include/linux/string.h index b48d2d28e0b1..5e96d656be7a 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -6,7 +6,7 @@ #include <linux/types.h> /* for size_t */ #include <linux/stddef.h> /* for NULL */ #include <linux/errno.h> /* for E2BIG */ -#include <stdarg.h> +#include <linux/stdarg.h> #include <uapi/linux/string.h> extern char *strndup_user(const char __user *, long); diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 8b5d5c97553e..a4661646adc9 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h @@ -14,6 +14,7 @@ #include <linux/socket.h> #include <linux/in.h> #include <linux/in6.h> +#include <linux/refcount.h> #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/sched.h> @@ -35,7 +36,7 @@ struct rpc_sysfs_client; * The high-level client handle */ struct rpc_clnt { - atomic_t cl_count; /* Number of references */ + refcount_t cl_count; /* Number of references */ unsigned int cl_clid; /* client id */ struct list_head cl_clients; /* Global list of clients */ struct list_head cl_tasks; /* List of tasks */ @@ -81,6 +82,7 @@ struct rpc_clnt { struct work_struct cl_work; }; const struct cred *cl_cred; + unsigned int cl_max_connect; /* max number of transports not to the same IP */ }; /* @@ -135,6 +137,7 @@ struct rpc_create_args { char *client_name; struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */ const struct cred *cred; + unsigned int max_connect; }; struct rpc_add_xprt_test { diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index 938c2bf29db8..02117ed0fa2e 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h @@ -20,6 +20,7 @@ enum rpc_auth_flavors { RPC_AUTH_DES = 3, RPC_AUTH_KRB = 4, RPC_AUTH_GSS = 6, + RPC_AUTH_TLS = 7, RPC_AUTH_MAXFLAVOR = 8, /* pseudoflavors: */ RPC_AUTH_GSS_KRB5 = 390003, diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index e91d51ea028b..064c96157d1f 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -19,6 +19,7 @@ #include <linux/sunrpc/svcauth.h> #include <linux/wait.h> #include <linux/mm.h> +#include <linux/pagevec.h> /* statistics for svc_pool structures */ struct svc_pool_stats { @@ -256,6 +257,7 @@ struct svc_rqst { struct page * *rq_next_page; /* next reply page to use */ struct page * *rq_page_end; /* one past the last page */ + struct pagevec rq_pvec; struct kvec rq_vec[RPCSVC_MAXPAGES]; /* generally useful.. */ struct bio_vec rq_bvec[RPCSVC_MAXPAGES]; @@ -275,13 +277,13 @@ struct svc_rqst { #define RQ_VICTIM (5) /* about to be shut down */ #define RQ_BUSY (6) /* request is busy */ #define RQ_DATA (7) /* request has data */ -#define RQ_AUTHERR (8) /* Request status is auth error */ unsigned long rq_flags; /* flags field */ ktime_t rq_qtime; /* enqueue time */ void * rq_argp; /* decoded arguments */ void * rq_resp; /* xdr'd results */ void * rq_auth_data; /* flavor-specific data */ + __be32 rq_auth_stat; /* authentication status */ int rq_auth_slack; /* extra space xdr code * should leave in head * for krb5i, krb5p. @@ -502,6 +504,8 @@ struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node); struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node); +void svc_rqst_replace_page(struct svc_rqst *rqstp, + struct page *page); void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); unsigned int svc_pool_map_get(void); @@ -523,6 +527,7 @@ void svc_wake_up(struct svc_serv *); void svc_reserve(struct svc_rqst *rqstp, int space); struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu); char * svc_print_addr(struct svc_rqst *, char *, size_t); +const char * svc_proc_name(const struct svc_rqst *rqstp); int svc_encode_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); @@ -532,7 +537,6 @@ unsigned int svc_fill_write_vector(struct svc_rqst *rqstp, char *svc_fill_symlink_pathname(struct svc_rqst *rqstp, struct kvec *first, void *p, size_t total); -__be32 svc_return_autherr(struct svc_rqst *rqstp, __be32 auth_err); __be32 svc_generic_init_request(struct svc_rqst *rqstp, const struct svc_program *progp, struct svc_process_info *procinfo); diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 3184465de3a0..24aa159d29a7 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -90,9 +90,9 @@ struct svcxprt_rdma { struct ib_pd *sc_pd; spinlock_t sc_send_lock; - struct list_head sc_send_ctxts; + struct llist_head sc_send_ctxts; spinlock_t sc_rw_ctxt_lock; - struct list_head sc_rw_ctxts; + struct llist_head sc_rw_ctxts; u32 sc_pending_recvs; u32 sc_recv_batch; @@ -150,7 +150,7 @@ struct svc_rdma_recv_ctxt { }; struct svc_rdma_send_ctxt { - struct list_head sc_list; + struct llist_node sc_node; struct rpc_rdma_cid sc_cid; struct ib_send_wr sc_send_wr; @@ -207,6 +207,7 @@ extern void svc_rdma_send_error_msg(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *sctxt, struct svc_rdma_recv_ctxt *rctxt, int status); +extern void svc_rdma_wake_send_waiters(struct svcxprt_rdma *rdma, int avail); extern int svc_rdma_sendto(struct svc_rqst *); extern int svc_rdma_result_payload(struct svc_rqst *rqstp, unsigned int offset, unsigned int length); diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index b0003866a249..6d9cc9080aca 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -127,7 +127,7 @@ struct auth_ops { char * name; struct module *owner; int flavour; - int (*accept)(struct svc_rqst *rq, __be32 *authp); + int (*accept)(struct svc_rqst *rq); int (*release)(struct svc_rqst *rq); void (*domain_release)(struct auth_domain *); int (*set_client)(struct svc_rqst *rq); @@ -149,7 +149,7 @@ struct auth_ops { struct svc_xprt; -extern int svc_authenticate(struct svc_rqst *rqstp, __be32 *authp); +extern int svc_authenticate(struct svc_rqst *rqstp); extern int svc_authorise(struct svc_rqst *rqstp); extern int svc_set_client(struct svc_rqst *rqstp); extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index a965cbc136ad..b519609af1d0 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -95,6 +95,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) #define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX) #define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT) #define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS) +#define rpc_auth_tls cpu_to_be32(RPC_AUTH_TLS) #define rpc_call cpu_to_be32(RPC_CALL) #define rpc_reply cpu_to_be32(RPC_REPLY) diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index c8c39f22d3b1..955ea4d7af0b 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h @@ -288,7 +288,6 @@ struct rpc_xprt { const char *address_strings[RPC_DISPLAY_MAX]; #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) struct dentry *debugfs; /* debugfs directory */ - atomic_t inject_disconnect; #endif struct rcu_head rcu; const struct xprt_class *xprt_class; @@ -432,6 +431,7 @@ void xprt_release_write(struct rpc_xprt *, struct rpc_task *); #define XPRT_CONGESTED (9) #define XPRT_CWND_WAIT (10) #define XPRT_WRITE_SPACE (11) +#define XPRT_SND_IS_COOKIE (12) static inline void xprt_set_connected(struct rpc_xprt *xprt) { @@ -502,21 +502,4 @@ static inline int xprt_test_and_set_binding(struct rpc_xprt *xprt) return test_and_set_bit(XPRT_BINDING, &xprt->state); } -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -extern unsigned int rpc_inject_disconnect; -static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) -{ - if (!rpc_inject_disconnect) - return; - if (atomic_dec_return(&xprt->inject_disconnect)) - return; - atomic_set(&xprt->inject_disconnect, rpc_inject_disconnect); - xprt->ops->inject_disconnect(xprt); -} -#else -static inline void xprt_inject_disconnect(struct rpc_xprt *xprt) -{ -} -#endif - #endif /* _LINUX_SUNRPC_XPRT_H */ diff --git a/include/linux/sunrpc/xprtmultipath.h b/include/linux/sunrpc/xprtmultipath.h index b19addc8b715..bbb8a5fa0816 100644 --- a/include/linux/sunrpc/xprtmultipath.h +++ b/include/linux/sunrpc/xprtmultipath.h @@ -18,6 +18,7 @@ struct rpc_xprt_switch { unsigned int xps_id; unsigned int xps_nxprts; unsigned int xps_nactive; + unsigned int xps_nunique_destaddr_xprts; atomic_long_t xps_queuelen; struct list_head xps_xprt_list; diff --git a/include/linux/swap.h b/include/linux/swap.h index 6f5a43251593..ba52f3a3478e 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -408,7 +408,7 @@ static inline bool node_reclaim_enabled(void) extern void check_move_unevictable_pages(struct pagevec *pvec); -extern int kswapd_run(int nid); +extern void kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_SWAP @@ -721,7 +721,13 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem) #endif #if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) -extern void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); +extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask); +static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) +{ + if (mem_cgroup_disabled()) + return; + __cgroup_throttle_swaprate(page, gfp_mask); +} #else static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) { @@ -730,8 +736,22 @@ static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) #ifdef CONFIG_MEMCG_SWAP extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry); -extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); -extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); +extern int __mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry); +static inline int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry) +{ + if (mem_cgroup_disabled()) + return 0; + return __mem_cgroup_try_charge_swap(page, entry); +} + +extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); +static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) +{ + if (mem_cgroup_disabled()) + return; + __mem_cgroup_uncharge_swap(entry, nr_pages); +} + extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg); extern bool mem_cgroup_swap_full(struct page *page); #else diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 216854a5e513..b0cb2a9973f4 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h @@ -2,6 +2,7 @@ #ifndef __LINUX_SWIOTLB_H #define __LINUX_SWIOTLB_H +#include <linux/device.h> #include <linux/dma-direction.h> #include <linux/init.h> #include <linux/types.h> @@ -72,7 +73,8 @@ extern enum swiotlb_force swiotlb_force; * range check to see if the memory was in fact allocated by this * API. * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and - * @end. This is command line adjustable via setup_io_tlb_npages. + * @end. For default swiotlb, this is command line adjustable via + * setup_io_tlb_npages. * @used: The number of used IO TLB block. * @list: The free list describing the number of free entries available * from each index. @@ -83,6 +85,8 @@ extern enum swiotlb_force swiotlb_force; * unmap calls. * @debugfs: The dentry to debugfs. * @late_alloc: %true if allocated using the page allocator + * @force_bounce: %true if swiotlb bouncing is forced + * @for_alloc: %true if the pool is used for memory allocation */ struct io_tlb_mem { phys_addr_t start; @@ -93,29 +97,42 @@ struct io_tlb_mem { spinlock_t lock; struct dentry *debugfs; bool late_alloc; + bool force_bounce; + bool for_alloc; struct io_tlb_slot { phys_addr_t orig_addr; size_t alloc_size; unsigned int list; - } slots[]; + } *slots; }; -extern struct io_tlb_mem *io_tlb_default_mem; +extern struct io_tlb_mem io_tlb_default_mem; -static inline bool is_swiotlb_buffer(phys_addr_t paddr) +static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) { - struct io_tlb_mem *mem = io_tlb_default_mem; + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; return mem && paddr >= mem->start && paddr < mem->end; } +static inline bool is_swiotlb_force_bounce(struct device *dev) +{ + struct io_tlb_mem *mem = dev->dma_io_tlb_mem; + + return mem && mem->force_bounce; +} + void __init swiotlb_exit(void); unsigned int swiotlb_max_segment(void); size_t swiotlb_max_mapping_size(struct device *dev); -bool is_swiotlb_active(void); +bool is_swiotlb_active(struct device *dev); void __init swiotlb_adjust_size(unsigned long size); #else #define swiotlb_force SWIOTLB_NO_FORCE -static inline bool is_swiotlb_buffer(phys_addr_t paddr) +static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr) +{ + return false; +} +static inline bool is_swiotlb_force_bounce(struct device *dev) { return false; } @@ -131,7 +148,7 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev) return SIZE_MAX; } -static inline bool is_swiotlb_active(void) +static inline bool is_swiotlb_active(struct device *dev) { return false; } @@ -144,4 +161,28 @@ static inline void swiotlb_adjust_size(unsigned long size) extern void swiotlb_print_info(void); extern void swiotlb_set_max_segment(unsigned int); +#ifdef CONFIG_DMA_RESTRICTED_POOL +struct page *swiotlb_alloc(struct device *dev, size_t size); +bool swiotlb_free(struct device *dev, struct page *page, size_t size); + +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + return dev->dma_io_tlb_mem->for_alloc; +} +#else +static inline struct page *swiotlb_alloc(struct device *dev, size_t size) +{ + return NULL; +} +static inline bool swiotlb_free(struct device *dev, struct page *page, + size_t size) +{ + return false; +} +static inline bool is_swiotlb_for_alloc(struct device *dev) +{ + return false; +} +#endif /* CONFIG_DMA_RESTRICTED_POOL */ + #endif /* __LINUX_SWIOTLB_H */ diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 69c9a7010081..252243c7783d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -915,6 +915,7 @@ asmlinkage long sys_mincore(unsigned long start, size_t len, asmlinkage long sys_madvise(unsigned long start, size_t len, int behavior); asmlinkage long sys_process_madvise(int pidfd, const struct iovec __user *vec, size_t vlen, int behavior, unsigned int flags); +asmlinkage long sys_process_mrelease(int pidfd, unsigned int flags); asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size, unsigned long prot, unsigned long pgoff, unsigned long flags); @@ -1158,7 +1159,6 @@ asmlinkage long sys_ustat(unsigned dev, struct ustat __user *ubuf); asmlinkage long sys_vfork(void); asmlinkage long sys_recv(int, void __user *, size_t, unsigned); asmlinkage long sys_send(int, void __user *, size_t, unsigned); -asmlinkage long sys_bdflush(int func, long data); asmlinkage long sys_oldumount(char __user *name); asmlinkage long sys_uselib(const char __user *library); asmlinkage long sys_sysfs(int option, @@ -1373,6 +1373,9 @@ long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems, unsigned int nsops, const struct old_timespec32 __user *timeout); +long __do_semtimedop(int semid, struct sembuf *tsems, unsigned int nsops, + const struct timespec64 *timeout, + struct ipc_namespace *ns); int __sys_getsockopt(int fd, int level, int optname, char __user *optval, int __user *optlen); diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index d99ca99837de..1fa2b69c6fc3 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h @@ -48,6 +48,8 @@ typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer, size_t *lenp, loff_t *ppos); int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *); +int proc_dobool(struct ctl_table *table, int write, void *buffer, + size_t *lenp, loff_t *ppos); int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *); int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *); diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index a12556a4b93a..e3f1e8ac1f85 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h @@ -176,7 +176,7 @@ struct bin_attribute { struct attribute attr; size_t size; void *private; - struct address_space *mapping; + struct address_space *(*f_mapping)(void); ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *, char *, loff_t, size_t); ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *, diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 54269e47ac9a..3ebfea0781f1 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h @@ -27,6 +27,7 @@ #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ #define TEE_SHM_KERNEL_MAPPED BIT(6) /* Memory mapped in kernel space */ +#define TEE_SHM_PRIV BIT(7) /* Memory private to TEE driver */ struct device; struct tee_device; @@ -332,6 +333,7 @@ void *tee_get_drvdata(struct tee_device *teedev); * @returns a pointer to 'struct tee_shm' */ struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); +struct tee_shm *tee_shm_alloc_kernel_buf(struct tee_context *ctx, size_t size); /** * tee_shm_register() - Register shared memory buffer diff --git a/include/linux/thermal.h b/include/linux/thermal.h index d296f3b88fb9..c314893970b3 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -285,7 +285,7 @@ struct thermal_zone_params { }; /** - * struct thermal_zone_of_device_ops - scallbacks for handling DT based zones + * struct thermal_zone_of_device_ops - callbacks for handling DT based zones * * Mandatory: * @get_temp: a pointer to a function that reads the sensor temperature. @@ -404,12 +404,13 @@ static inline void thermal_zone_device_unregister( struct thermal_zone_device *tz) { } static inline struct thermal_cooling_device * -thermal_cooling_device_register(char *type, void *devdata, +thermal_cooling_device_register(const char *type, void *devdata, const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * thermal_of_cooling_device_register(struct device_node *np, - char *type, void *devdata, const struct thermal_cooling_device_ops *ops) + const char *type, void *devdata, + const struct thermal_cooling_device_ops *ops) { return ERR_PTR(-ENODEV); } static inline struct thermal_cooling_device * devm_thermal_of_cooling_device_register(struct device *dev, diff --git a/include/linux/threads.h b/include/linux/threads.h index 18d5a74bcc3d..c34173e6c5f1 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h @@ -38,7 +38,7 @@ * Define a minimum number of pids per cpu. Heuristically based * on original pid max of 32k for 32 cpus. Also, increase the * minimum settable value for pid_max on the running system based - * on similar defaults. See kernel/pid.c:pidmap_init() for details. + * on similar defaults. See kernel/pid.c:pid_idr_init() for details. */ #define PIDS_PER_CPU_DEFAULT 1024 #define PIDS_PER_CPU_MIN 8 diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h index e7c96c37174f..124e13cb1469 100644 --- a/include/linux/thunderbolt.h +++ b/include/linux/thunderbolt.h @@ -468,6 +468,7 @@ static inline struct tb_xdomain *tb_service_parent(struct tb_service *svc) * @interrupt_work: Work scheduled to handle ring interrupt when no * MSI-X is used. * @hop_count: Number of rings (end point hops) supported by NHI. + * @quirks: NHI specific quirks if any */ struct tb_nhi { spinlock_t lock; @@ -480,6 +481,7 @@ struct tb_nhi { bool going_away; struct work_struct interrupt_work; u32 hop_count; + unsigned long quirks; }; /** diff --git a/include/linux/time64.h b/include/linux/time64.h index 5117cb5b5656..81b9686a2079 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -25,7 +25,9 @@ struct itimerspec64 { #define TIME64_MIN (-TIME64_MAX - 1) #define KTIME_MAX ((s64)~((u64)1 << 63)) +#define KTIME_MIN (-KTIME_MAX - 1) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +#define KTIME_SEC_MIN (KTIME_MIN / NSEC_PER_SEC) /* * Limits for settimeofday(): @@ -124,10 +126,13 @@ static inline bool timespec64_valid_settod(const struct timespec64 *ts) */ static inline s64 timespec64_to_ns(const struct timespec64 *ts) { - /* Prevent multiplication overflow */ - if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + /* Prevent multiplication overflow / underflow */ + if (ts->tv_sec >= KTIME_SEC_MAX) return KTIME_MAX; + if (ts->tv_sec <= KTIME_SEC_MIN) + return KTIME_MIN; + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; } diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ad413b382a3c..3e475eeb5a99 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -310,8 +310,10 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_TRACEPOINT_BIT, + TRACE_EVENT_FL_DYNAMIC_BIT, TRACE_EVENT_FL_KPROBE_BIT, TRACE_EVENT_FL_UPROBE_BIT, + TRACE_EVENT_FL_EPROBE_BIT, }; /* @@ -321,8 +323,10 @@ enum { * NO_SET_FILTER - Set when filter has error and is to be ignored * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file * TRACEPOINT - Event is a tracepoint + * DYNAMIC - Event is a dynamic event (created at run time) * KPROBE - Event is a kprobe * UPROBE - Event is a uprobe + * EPROBE - Event is an event probe */ enum { TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), @@ -330,8 +334,10 @@ enum { TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT), + TRACE_EVENT_FL_DYNAMIC = (1 << TRACE_EVENT_FL_DYNAMIC_BIT), TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT), TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT), + TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT), }; #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE) @@ -347,7 +353,14 @@ struct trace_event_call { struct trace_event event; char *print_fmt; struct event_filter *filter; - void *mod; + /* + * Static events can disappear with modules, + * where as dynamic ones need their own ref count. + */ + union { + void *module; + atomic_t refcnt; + }; void *data; /* See the TRACE_EVENT_FL_* flags above */ @@ -363,6 +376,42 @@ struct trace_event_call { #endif }; +#ifdef CONFIG_DYNAMIC_EVENTS +bool trace_event_dyn_try_get_ref(struct trace_event_call *call); +void trace_event_dyn_put_ref(struct trace_event_call *call); +bool trace_event_dyn_busy(struct trace_event_call *call); +#else +static inline bool trace_event_dyn_try_get_ref(struct trace_event_call *call) +{ + /* Without DYNAMIC_EVENTS configured, nothing should be calling this */ + return false; +} +static inline void trace_event_dyn_put_ref(struct trace_event_call *call) +{ +} +static inline bool trace_event_dyn_busy(struct trace_event_call *call) +{ + /* Nothing should call this without DYNAIMIC_EVENTS configured. */ + return true; +} +#endif + +static inline bool trace_event_try_get_ref(struct trace_event_call *call) +{ + if (call->flags & TRACE_EVENT_FL_DYNAMIC) + return trace_event_dyn_try_get_ref(call); + else + return try_module_get(call->module); +} + +static inline void trace_event_put_ref(struct trace_event_call *call) +{ + if (call->flags & TRACE_EVENT_FL_DYNAMIC) + trace_event_dyn_put_ref(call); + else + module_put(call->module); +} + #ifdef CONFIG_PERF_EVENTS static inline bool bpf_prog_array_valid(struct trace_event_call *call) { @@ -634,6 +683,7 @@ enum event_trigger_type { ETT_EVENT_ENABLE = (1 << 3), ETT_EVENT_HIST = (1 << 4), ETT_HIST_ENABLE = (1 << 5), + ETT_EVENT_EPROBE = (1 << 6), }; extern int filter_match_preds(struct event_filter *filter, void *rec); @@ -675,7 +725,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) #ifdef CONFIG_BPF_EVENTS unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); -int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); +int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); void perf_event_detach_bpf_prog(struct perf_event *event); int perf_event_query_prog_array(struct perf_event *event, void __user *info); int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog); @@ -692,7 +742,7 @@ static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *c } static inline int -perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) +perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie) { return -EOPNOTSUPP; } @@ -803,6 +853,9 @@ extern void ftrace_profile_free_filter(struct perf_event *event); void perf_trace_buf_update(void *record, u16 type); void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp); +int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, u64 bpf_cookie); +void perf_event_free_bpf_prog(struct perf_event *event); + void bpf_trace_run1(struct bpf_prog *prog, u64 arg1); void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2); void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2, diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index ab58696d0ddd..28031b15f878 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -475,7 +475,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * * * * The declared 'local variable' is called '__entry' * * - * * __field(pid_t, prev_prid) is equivalent to a standard declaration: + * * __field(pid_t, prev_pid) is equivalent to a standard declaration: * * * * pid_t prev_pid; * * diff --git a/include/linux/tty.h b/include/linux/tty.h index 19dc1097e09c..168e57e40bbb 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -6,11 +6,12 @@ #include <linux/major.h> #include <linux/termios.h> #include <linux/workqueue.h> +#include <linux/tty_buffer.h> #include <linux/tty_driver.h> #include <linux/tty_ldisc.h> +#include <linux/tty_port.h> #include <linux/mutex.h> #include <linux/tty_flags.h> -#include <linux/seq_file.h> #include <uapi/linux/tty.h> #include <linux/rwsem.h> #include <linux/llist.h> @@ -31,54 +32,6 @@ */ #define __DISABLED_CHAR '\0' -struct tty_buffer { - union { - struct tty_buffer *next; - struct llist_node free; - }; - int used; - int size; - int commit; - int read; - int flags; - /* Data points here */ - unsigned long data[]; -}; - -/* Values for .flags field of tty_buffer */ -#define TTYB_NORMAL 1 /* buffer has no flags buffer */ - -static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) -{ - return ((unsigned char *)b->data) + ofs; -} - -static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) -{ - return (char *)char_buf_ptr(b, ofs) + b->size; -} - -struct tty_bufhead { - struct tty_buffer *head; /* Queue head */ - struct work_struct work; - struct mutex lock; - atomic_t priority; - struct tty_buffer sentinel; - struct llist_head free; /* Free queue head */ - atomic_t mem_used; /* In-use buffers excluding free list */ - int mem_limit; - struct tty_buffer *tail; /* Active buffer */ -}; -/* - * When a break, frame error, or parity error happens, these codes are - * stuffed into the flags buffer. - */ -#define TTY_NORMAL 0 -#define TTY_BREAK 1 -#define TTY_FRAME 2 -#define TTY_PARITY 3 -#define TTY_OVERRUN 4 - #define INTR_CHAR(tty) ((tty)->termios.c_cc[VINTR]) #define QUIT_CHAR(tty) ((tty)->termios.c_cc[VQUIT]) #define ERASE_CHAR(tty) ((tty)->termios.c_cc[VERASE]) @@ -164,85 +117,6 @@ struct tty_bufhead { struct device; struct signal_struct; - -/* - * Port level information. Each device keeps its own port level information - * so provide a common structure for those ports wanting to use common support - * routines. - * - * The tty port has a different lifetime to the tty so must be kept apart. - * In addition be careful as tty -> port mappings are valid for the life - * of the tty object but in many cases port -> tty mappings are valid only - * until a hangup so don't use the wrong path. - */ - -struct tty_port; - -struct tty_port_operations { - /* Return 1 if the carrier is raised */ - int (*carrier_raised)(struct tty_port *port); - /* Control the DTR line */ - void (*dtr_rts)(struct tty_port *port, int raise); - /* Called when the last close completes or a hangup finishes - IFF the port was initialized. Do not use to free resources. Called - under the port mutex to serialize against activate/shutdowns */ - void (*shutdown)(struct tty_port *port); - /* Called under the port mutex from tty_port_open, serialized using - the port mutex */ - /* FIXME: long term getting the tty argument *out* of this would be - good for consoles */ - int (*activate)(struct tty_port *port, struct tty_struct *tty); - /* Called on the final put of a port */ - void (*destruct)(struct tty_port *port); -}; - -struct tty_port_client_operations { - int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); - void (*write_wakeup)(struct tty_port *port); -}; - -extern const struct tty_port_client_operations tty_port_default_client_ops; - -struct tty_port { - struct tty_bufhead buf; /* Locked internally */ - struct tty_struct *tty; /* Back pointer */ - struct tty_struct *itty; /* internal back ptr */ - const struct tty_port_operations *ops; /* Port operations */ - const struct tty_port_client_operations *client_ops; /* Port client operations */ - spinlock_t lock; /* Lock protecting tty field */ - int blocked_open; /* Waiting to open */ - int count; /* Usage count */ - wait_queue_head_t open_wait; /* Open waiters */ - wait_queue_head_t delta_msr_wait; /* Modem status change */ - unsigned long flags; /* User TTY flags ASYNC_ */ - unsigned long iflags; /* Internal flags TTY_PORT_ */ - unsigned char console:1; /* port is a console */ - struct mutex mutex; /* Locking */ - struct mutex buf_mutex; /* Buffer alloc lock */ - unsigned char *xmit_buf; /* Optional buffer */ - unsigned int close_delay; /* Close port delay */ - unsigned int closing_wait; /* Delay for output */ - int drain_delay; /* Set to zero if no pure time - based drain is needed else - set to size of fifo */ - struct kref kref; /* Ref counter */ - void *client_data; -}; - -/* tty_port::iflags bits -- use atomic bit ops */ -#define TTY_PORT_INITIALIZED 0 /* device is initialized */ -#define TTY_PORT_SUSPENDED 1 /* device is suspended */ -#define TTY_PORT_ACTIVE 2 /* device is open */ - -/* - * uart drivers: use the uart_port::status field and the UPSTAT_* defines - * for s/w-based flow control steering and carrier detection status - */ -#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ -#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ -#define TTY_PORT_KOPENED 5 /* device exclusively opened by - kernel */ - struct tty_operations; /** @@ -446,15 +320,6 @@ extern const char *tty_driver_name(const struct tty_struct *tty); extern void tty_wait_until_sent(struct tty_struct *tty, long timeout); extern void stop_tty(struct tty_struct *tty); extern void start_tty(struct tty_struct *tty); -extern int tty_register_driver(struct tty_driver *driver); -extern void tty_unregister_driver(struct tty_driver *driver); -extern struct device *tty_register_device(struct tty_driver *driver, - unsigned index, struct device *dev); -extern struct device *tty_register_device_attr(struct tty_driver *driver, - unsigned index, struct device *device, - void *drvdata, - const struct attribute_group **attr_grp); -extern void tty_unregister_device(struct tty_driver *driver, unsigned index); extern void tty_write_message(struct tty_struct *tty, char *msg); extern int tty_send_xchar(struct tty_struct *tty, char ch); extern int tty_put_char(struct tty_struct *tty, unsigned char c); @@ -502,13 +367,7 @@ extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old); extern int tty_termios_hw_change(const struct ktermios *a, const struct ktermios *b); extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt); -extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); -extern void tty_ldisc_deref(struct tty_ldisc *); -extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); -extern const struct seq_operations tty_ldiscs_seq_ops; - extern void tty_wakeup(struct tty_struct *tty); -extern void tty_ldisc_flush(struct tty_struct *tty); extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); @@ -522,128 +381,6 @@ extern int tty_standard_install(struct tty_driver *driver, extern struct mutex tty_mutex; -extern void tty_port_init(struct tty_port *port); -extern void tty_port_link_device(struct tty_port *port, - struct tty_driver *driver, unsigned index); -extern struct device *tty_port_register_device(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device); -extern struct device *tty_port_register_device_attr(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device, void *drvdata, - const struct attribute_group **attr_grp); -extern struct device *tty_port_register_device_serdev(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device); -extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port, - struct tty_driver *driver, unsigned index, - struct device *device, void *drvdata, - const struct attribute_group **attr_grp); -extern void tty_port_unregister_device(struct tty_port *port, - struct tty_driver *driver, unsigned index); -extern int tty_port_alloc_xmit_buf(struct tty_port *port); -extern void tty_port_free_xmit_buf(struct tty_port *port); -extern void tty_port_destroy(struct tty_port *port); -extern void tty_port_put(struct tty_port *port); - -static inline struct tty_port *tty_port_get(struct tty_port *port) -{ - if (port && kref_get_unless_zero(&port->kref)) - return port; - return NULL; -} - -/* If the cts flow control is enabled, return true. */ -static inline bool tty_port_cts_enabled(const struct tty_port *port) -{ - return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); -} - -static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); -} - -static inline bool tty_port_active(const struct tty_port *port) -{ - return test_bit(TTY_PORT_ACTIVE, &port->iflags); -} - -static inline void tty_port_set_active(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); -} - -static inline bool tty_port_check_carrier(const struct tty_port *port) -{ - return test_bit(TTY_PORT_CHECK_CD, &port->iflags); -} - -static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); -} - -static inline bool tty_port_suspended(const struct tty_port *port) -{ - return test_bit(TTY_PORT_SUSPENDED, &port->iflags); -} - -static inline void tty_port_set_suspended(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); -} - -static inline bool tty_port_initialized(const struct tty_port *port) -{ - return test_bit(TTY_PORT_INITIALIZED, &port->iflags); -} - -static inline void tty_port_set_initialized(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); -} - -static inline bool tty_port_kopened(const struct tty_port *port) -{ - return test_bit(TTY_PORT_KOPENED, &port->iflags); -} - -static inline void tty_port_set_kopened(struct tty_port *port, bool val) -{ - assign_bit(TTY_PORT_KOPENED, &port->iflags, val); -} - -extern struct tty_struct *tty_port_tty_get(struct tty_port *port); -extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); -extern int tty_port_carrier_raised(struct tty_port *port); -extern void tty_port_raise_dtr_rts(struct tty_port *port); -extern void tty_port_lower_dtr_rts(struct tty_port *port); -extern void tty_port_hangup(struct tty_port *port); -extern void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); -extern void tty_port_tty_wakeup(struct tty_port *port); -extern int tty_port_block_til_ready(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern int tty_port_close_start(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); -extern void tty_port_close(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -extern int tty_port_install(struct tty_port *port, struct tty_driver *driver, - struct tty_struct *tty); -extern int tty_port_open(struct tty_port *port, - struct tty_struct *tty, struct file *filp); -static inline int tty_port_users(struct tty_port *port) -{ - return port->count + port->blocked_open; -} - -extern int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); -extern void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); -extern int tty_set_ldisc(struct tty_struct *tty, int disc); -extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, - const char *f, int count); - /* n_tty.c */ extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops); #ifdef CONFIG_TTY @@ -691,12 +428,4 @@ extern void tty_lock_slave(struct tty_struct *tty); extern void tty_unlock_slave(struct tty_struct *tty); extern void tty_set_lock_subclass(struct tty_struct *tty); -#ifdef CONFIG_PROC_FS -extern void proc_tty_register_driver(struct tty_driver *); -extern void proc_tty_unregister_driver(struct tty_driver *); -#else -static inline void proc_tty_register_driver(struct tty_driver *d) {} -static inline void proc_tty_unregister_driver(struct tty_driver *d) {} -#endif - #endif diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h new file mode 100644 index 000000000000..3b9d77604291 --- /dev/null +++ b/include/linux/tty_buffer.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TTY_BUFFER_H +#define _LINUX_TTY_BUFFER_H + +#include <linux/atomic.h> +#include <linux/llist.h> +#include <linux/mutex.h> +#include <linux/workqueue.h> + +struct tty_buffer { + union { + struct tty_buffer *next; + struct llist_node free; + }; + int used; + int size; + int commit; + int read; + int flags; + /* Data points here */ + unsigned long data[]; +}; + +/* Values for .flags field of tty_buffer */ +#define TTYB_NORMAL 1 /* buffer has no flags buffer */ + +static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs) +{ + return ((unsigned char *)b->data) + ofs; +} + +static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs) +{ + return (char *)char_buf_ptr(b, ofs) + b->size; +} + +struct tty_bufhead { + struct tty_buffer *head; /* Queue head */ + struct work_struct work; + struct mutex lock; + atomic_t priority; + struct tty_buffer sentinel; + struct llist_head free; /* Free queue head */ + atomic_t mem_used; /* In-use buffers excluding free list */ + int mem_limit; + struct tty_buffer *tail; /* Active buffer */ +}; + +/* + * When a break, frame error, or parity error happens, these codes are + * stuffed into the flags buffer. + */ +#define TTY_NORMAL 0 +#define TTY_BREAK 1 +#define TTY_FRAME 2 +#define TTY_PARITY 3 +#define TTY_OVERRUN 4 + +#endif diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 448f8ee6db6e..c20431d8def8 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -233,6 +233,7 @@ #include <linux/export.h> #include <linux/fs.h> +#include <linux/kref.h> #include <linux/list.h> #include <linux/cdev.h> #include <linux/termios.h> @@ -328,9 +329,6 @@ extern struct list_head tty_drivers; extern struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner, unsigned long flags); -extern void put_tty_driver(struct tty_driver *driver); -extern void tty_set_operations(struct tty_driver *driver, - const struct tty_operations *op); extern struct tty_driver *tty_find_polling_driver(char *name, int *line); extern void tty_driver_kref_put(struct tty_driver *driver); @@ -339,24 +337,18 @@ extern void tty_driver_kref_put(struct tty_driver *driver); #define tty_alloc_driver(lines, flags) \ __tty_alloc_driver(lines, THIS_MODULE, flags) -/* - * DEPRECATED Do not use this in new code, use tty_alloc_driver instead. - * (And change the return value checks.) - */ -static inline struct tty_driver *alloc_tty_driver(unsigned int lines) -{ - struct tty_driver *ret = tty_alloc_driver(lines, 0); - if (IS_ERR(ret)) - return NULL; - return ret; -} - static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) { kref_get(&d->kref); return d; } +static inline void tty_set_operations(struct tty_driver *driver, + const struct tty_operations *op) +{ + driver->ops = op; +} + /* tty driver magic number */ #define TTY_DRIVER_MAGIC 0x5402 @@ -434,4 +426,21 @@ static inline struct tty_driver *tty_driver_kref_get(struct tty_driver *d) /* serial subtype definitions */ #define SERIAL_TYPE_NORMAL 1 +int tty_register_driver(struct tty_driver *driver); +void tty_unregister_driver(struct tty_driver *driver); +struct device *tty_register_device(struct tty_driver *driver, unsigned index, + struct device *dev); +struct device *tty_register_device_attr(struct tty_driver *driver, + unsigned index, struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +void tty_unregister_device(struct tty_driver *driver, unsigned index); + +#ifdef CONFIG_PROC_FS +void proc_tty_register_driver(struct tty_driver *); +void proc_tty_unregister_driver(struct tty_driver *); +#else +static inline void proc_tty_register_driver(struct tty_driver *d) {} +static inline void proc_tty_unregister_driver(struct tty_driver *d) {} +#endif + #endif /* #ifdef _LINUX_TTY_DRIVER_H */ diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h index 67d78dc553e1..32284992b31a 100644 --- a/include/linux/tty_flip.h +++ b/include/linux/tty_flip.h @@ -2,7 +2,10 @@ #ifndef _LINUX_TTY_FLIP_H #define _LINUX_TTY_FLIP_H -#include <linux/tty.h> +#include <linux/tty_buffer.h> +#include <linux/tty_port.h> + +struct tty_ldisc; extern int tty_buffer_set_limit(struct tty_port *port, int limit); extern unsigned int tty_buffer_space_avail(struct tty_port *port); @@ -39,6 +42,9 @@ static inline int tty_insert_flip_string(struct tty_port *port, return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size); } +int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p, + const char *f, int count); + extern void tty_buffer_lock_exclusive(struct tty_port *port); extern void tty_buffer_unlock_exclusive(struct tty_port *port); diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index fbe9de278629..b1d812e902aa 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h @@ -2,6 +2,8 @@ #ifndef _LINUX_TTY_LDISC_H #define _LINUX_TTY_LDISC_H +struct tty_struct; + /* * This structure defines the interface between the tty line discipline * implementation and the tty routines. The following routines can be @@ -126,6 +128,9 @@ #include <linux/fs.h> #include <linux/wait.h> #include <linux/atomic.h> +#include <linux/list.h> +#include <linux/lockdep.h> +#include <linux/seq_file.h> /* * the semaphore definition @@ -220,4 +225,16 @@ struct tty_ldisc { #define MODULE_ALIAS_LDISC(ldisc) \ MODULE_ALIAS("tty-ldisc-" __stringify(ldisc)) +extern const struct seq_operations tty_ldiscs_seq_ops; + +struct tty_ldisc *tty_ldisc_ref(struct tty_struct *); +void tty_ldisc_deref(struct tty_ldisc *); +struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *); + +void tty_ldisc_flush(struct tty_struct *tty); + +int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc); +void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc); +int tty_set_ldisc(struct tty_struct *tty, int disc); + #endif /* _LINUX_TTY_LDISC_H */ diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h new file mode 100644 index 000000000000..6e86e9e118b6 --- /dev/null +++ b/include/linux/tty_port.h @@ -0,0 +1,208 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_TTY_PORT_H +#define _LINUX_TTY_PORT_H + +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/tty_buffer.h> +#include <linux/wait.h> + +/* + * Port level information. Each device keeps its own port level information + * so provide a common structure for those ports wanting to use common support + * routines. + * + * The tty port has a different lifetime to the tty so must be kept apart. + * In addition be careful as tty -> port mappings are valid for the life + * of the tty object but in many cases port -> tty mappings are valid only + * until a hangup so don't use the wrong path. + */ + +struct attribute_group; +struct tty_driver; +struct tty_port; +struct tty_struct; + +struct tty_port_operations { + /* Return 1 if the carrier is raised */ + int (*carrier_raised)(struct tty_port *port); + /* Control the DTR line */ + void (*dtr_rts)(struct tty_port *port, int raise); + /* Called when the last close completes or a hangup finishes + IFF the port was initialized. Do not use to free resources. Called + under the port mutex to serialize against activate/shutdowns */ + void (*shutdown)(struct tty_port *port); + /* Called under the port mutex from tty_port_open, serialized using + the port mutex */ + /* FIXME: long term getting the tty argument *out* of this would be + good for consoles */ + int (*activate)(struct tty_port *port, struct tty_struct *tty); + /* Called on the final put of a port */ + void (*destruct)(struct tty_port *port); +}; + +struct tty_port_client_operations { + int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t); + void (*write_wakeup)(struct tty_port *port); +}; + +extern const struct tty_port_client_operations tty_port_default_client_ops; + +struct tty_port { + struct tty_bufhead buf; /* Locked internally */ + struct tty_struct *tty; /* Back pointer */ + struct tty_struct *itty; /* internal back ptr */ + const struct tty_port_operations *ops; /* Port operations */ + const struct tty_port_client_operations *client_ops; /* Port client operations */ + spinlock_t lock; /* Lock protecting tty field */ + int blocked_open; /* Waiting to open */ + int count; /* Usage count */ + wait_queue_head_t open_wait; /* Open waiters */ + wait_queue_head_t delta_msr_wait; /* Modem status change */ + unsigned long flags; /* User TTY flags ASYNC_ */ + unsigned long iflags; /* Internal flags TTY_PORT_ */ + unsigned char console:1; /* port is a console */ + struct mutex mutex; /* Locking */ + struct mutex buf_mutex; /* Buffer alloc lock */ + unsigned char *xmit_buf; /* Optional buffer */ + unsigned int close_delay; /* Close port delay */ + unsigned int closing_wait; /* Delay for output */ + int drain_delay; /* Set to zero if no pure time + based drain is needed else + set to size of fifo */ + struct kref kref; /* Ref counter */ + void *client_data; +}; + +/* tty_port::iflags bits -- use atomic bit ops */ +#define TTY_PORT_INITIALIZED 0 /* device is initialized */ +#define TTY_PORT_SUSPENDED 1 /* device is suspended */ +#define TTY_PORT_ACTIVE 2 /* device is open */ + +/* + * uart drivers: use the uart_port::status field and the UPSTAT_* defines + * for s/w-based flow control steering and carrier detection status + */ +#define TTY_PORT_CTS_FLOW 3 /* h/w flow control enabled */ +#define TTY_PORT_CHECK_CD 4 /* carrier detect enabled */ +#define TTY_PORT_KOPENED 5 /* device exclusively opened by + kernel */ + +void tty_port_init(struct tty_port *port); +void tty_port_link_device(struct tty_port *port, struct tty_driver *driver, + unsigned index); +struct device *tty_port_register_device(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +struct device *tty_port_register_device_attr(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +struct device *tty_port_register_device_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device); +struct device *tty_port_register_device_attr_serdev(struct tty_port *port, + struct tty_driver *driver, unsigned index, + struct device *device, void *drvdata, + const struct attribute_group **attr_grp); +void tty_port_unregister_device(struct tty_port *port, + struct tty_driver *driver, unsigned index); +int tty_port_alloc_xmit_buf(struct tty_port *port); +void tty_port_free_xmit_buf(struct tty_port *port); +void tty_port_destroy(struct tty_port *port); +void tty_port_put(struct tty_port *port); + +static inline struct tty_port *tty_port_get(struct tty_port *port) +{ + if (port && kref_get_unless_zero(&port->kref)) + return port; + return NULL; +} + +/* If the cts flow control is enabled, return true. */ +static inline bool tty_port_cts_enabled(const struct tty_port *port) +{ + return test_bit(TTY_PORT_CTS_FLOW, &port->iflags); +} + +static inline void tty_port_set_cts_flow(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_CTS_FLOW, &port->iflags, val); +} + +static inline bool tty_port_active(const struct tty_port *port) +{ + return test_bit(TTY_PORT_ACTIVE, &port->iflags); +} + +static inline void tty_port_set_active(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_ACTIVE, &port->iflags, val); +} + +static inline bool tty_port_check_carrier(const struct tty_port *port) +{ + return test_bit(TTY_PORT_CHECK_CD, &port->iflags); +} + +static inline void tty_port_set_check_carrier(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_CHECK_CD, &port->iflags, val); +} + +static inline bool tty_port_suspended(const struct tty_port *port) +{ + return test_bit(TTY_PORT_SUSPENDED, &port->iflags); +} + +static inline void tty_port_set_suspended(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_SUSPENDED, &port->iflags, val); +} + +static inline bool tty_port_initialized(const struct tty_port *port) +{ + return test_bit(TTY_PORT_INITIALIZED, &port->iflags); +} + +static inline void tty_port_set_initialized(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_INITIALIZED, &port->iflags, val); +} + +static inline bool tty_port_kopened(const struct tty_port *port) +{ + return test_bit(TTY_PORT_KOPENED, &port->iflags); +} + +static inline void tty_port_set_kopened(struct tty_port *port, bool val) +{ + assign_bit(TTY_PORT_KOPENED, &port->iflags, val); +} + +struct tty_struct *tty_port_tty_get(struct tty_port *port); +void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty); +int tty_port_carrier_raised(struct tty_port *port); +void tty_port_raise_dtr_rts(struct tty_port *port); +void tty_port_lower_dtr_rts(struct tty_port *port); +void tty_port_hangup(struct tty_port *port); +void tty_port_tty_hangup(struct tty_port *port, bool check_clocal); +void tty_port_tty_wakeup(struct tty_port *port); +int tty_port_block_til_ready(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +int tty_port_close_start(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +void tty_port_close_end(struct tty_port *port, struct tty_struct *tty); +void tty_port_close(struct tty_port *port, struct tty_struct *tty, + struct file *filp); +int tty_port_install(struct tty_port *port, struct tty_driver *driver, + struct tty_struct *tty); +int tty_port_open(struct tty_port *port, struct tty_struct *tty, + struct file *filp); + +static inline int tty_port_users(struct tty_port *port) +{ + return port->count + port->blocked_open; +} + +#endif diff --git a/include/linux/typecheck.h b/include/linux/typecheck.h index 20d310331eb5..46b15e2aaefb 100644 --- a/include/linux/typecheck.h +++ b/include/linux/typecheck.h @@ -22,4 +22,13 @@ (void)__tmp; \ }) +/* + * Check at compile time that something is a pointer type. + */ +#define typecheck_pointer(x) \ +({ typeof(x) __dummy; \ + (void)sizeof(*__dummy); \ + 1; \ +}) + #endif /* TYPECHECK_H_INCLUDED */ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index c05e903cef02..ac0394087f7d 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -200,16 +200,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) n = _copy_to_user(to, from, n); return n; } -#ifdef CONFIG_COMPAT -static __always_inline unsigned long __must_check -copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - might_fault(); - if (access_ok(to, n) && access_ok(from, n)) - n = raw_copy_in_user(to, from, n); - return n; -} -#endif #ifndef copy_mc_to_kernel /* diff --git a/include/linux/uio.h b/include/linux/uio.h index 82c3c3e819e0..5265024e8b90 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -47,6 +47,7 @@ struct iov_iter { }; loff_t xarray_start; }; + size_t truncated; }; static inline enum iter_type iov_iter_type(const struct iov_iter *i) @@ -254,8 +255,10 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count) * conversion in assignement is by definition greater than all * values of size_t, including old i->count. */ - if (i->count > count) + if (i->count > count) { + i->truncated += i->count - count; i->count = count; + } } /* @@ -264,6 +267,7 @@ static inline void iov_iter_truncate(struct iov_iter *i, u64 count) */ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count) { + i->truncated -= count - i->count; i->count = count; } diff --git a/include/linux/units.h b/include/linux/units.h index dcc30a53fa93..681fc652e3d7 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -4,9 +4,29 @@ #include <linux/math.h> -#define MILLIWATT_PER_WATT 1000L -#define MICROWATT_PER_MILLIWATT 1000L -#define MICROWATT_PER_WATT 1000000L +/* Metric prefixes in accordance with Système international (d'unités) */ +#define PETA 1000000000000000ULL +#define TERA 1000000000000ULL +#define GIGA 1000000000UL +#define MEGA 1000000UL +#define KILO 1000UL +#define HECTO 100UL +#define DECA 10UL +#define DECI 10UL +#define CENTI 100UL +#define MILLI 1000UL +#define MICRO 1000000UL +#define NANO 1000000000UL +#define PICO 1000000000000ULL +#define FEMTO 1000000000000000ULL + +#define HZ_PER_KHZ 1000UL +#define KHZ_PER_MHZ 1000UL +#define HZ_PER_MHZ 1000000UL + +#define MILLIWATT_PER_WATT 1000UL +#define MICROWATT_PER_MILLIWATT 1000UL +#define MICROWATT_PER_WATT 1000000UL #define ABSOLUTE_ZERO_MILLICELSIUS -273150 diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h index ead8c9a47c6a..8fc2abd7aecb 100644 --- a/include/linux/usb/audio-v2.h +++ b/include/linux/usb/audio-v2.h @@ -156,6 +156,20 @@ struct uac2_feature_unit_descriptor { __u8 bmaControls[]; /* variable length */ } __attribute__((packed)); +#define UAC2_DT_FEATURE_UNIT_SIZE(ch) (6 + ((ch) + 1) * 4) + +/* As above, but more useful for defining your own descriptors: */ +#define DECLARE_UAC2_FEATURE_UNIT_DESCRIPTOR(ch) \ +struct uac2_feature_unit_descriptor_##ch { \ + __u8 bLength; \ + __u8 bDescriptorType; \ + __u8 bDescriptorSubtype; \ + __u8 bUnitID; \ + __u8 bSourceID; \ + __le32 bmaControls[ch + 1]; \ + __u8 iFeature; \ +} __packed + /* 4.7.2.10 Effect Unit Descriptor */ struct uac2_effect_unit_descriptor { diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h index 78e006355557..c892c5bc6638 100644 --- a/include/linux/usb/ehci_def.h +++ b/include/linux/usb/ehci_def.h @@ -45,6 +45,7 @@ struct ehci_caps { #define HCS_PORTROUTED(p) ((p)&(1 << 7)) /* true: port routing */ #define HCS_PPC(p) ((p)&(1 << 4)) /* true: port power control */ #define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */ +#define HCS_N_PORTS_MAX 15 /* N_PORTS valid 0x1-0xF */ u32 hcc_params; /* HCCPARAMS - offset 0x8 */ /* EHCI 1.1 addendum */ @@ -126,8 +127,9 @@ struct ehci_regs { u32 configured_flag; #define FLAG_CF (1<<0) /* true: we'll support "high speed" */ - /* PORTSC: offset 0x44 */ - u32 port_status[0]; /* up to N_PORTS */ + union { + /* PORTSC: offset 0x44 */ + u32 port_status[HCS_N_PORTS_MAX]; /* up to N_PORTS */ /* EHCI 1.1 addendum */ #define PORTSC_SUSPEND_STS_ACK 0 #define PORTSC_SUSPEND_STS_NYET 1 @@ -164,28 +166,35 @@ struct ehci_regs { #define PORT_CSC (1<<1) /* connect status change */ #define PORT_CONNECT (1<<0) /* device connected */ #define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC) - - u32 reserved3[9]; - - /* USBMODE: offset 0x68 */ - u32 usbmode; /* USB Device mode */ + struct { + u32 reserved3[9]; + /* USBMODE: offset 0x68 */ + u32 usbmode; /* USB Device mode */ + }; #define USBMODE_SDIS (1<<3) /* Stream disable */ #define USBMODE_BE (1<<2) /* BE/LE endianness select */ #define USBMODE_CM_HC (3<<0) /* host controller mode */ #define USBMODE_CM_IDLE (0<<0) /* idle state */ - - u32 reserved4[6]; + }; /* Moorestown has some non-standard registers, partially due to the fact that * its EHCI controller has both TT and LPM support. HOSTPCx are extensions to * PORTSCx */ - /* HOSTPC: offset 0x84 */ - u32 hostpc[0]; /* HOSTPC extension */ + union { + struct { + u32 reserved4; + /* HOSTPC: offset 0x84 */ + u32 hostpc[HCS_N_PORTS_MAX]; #define HOSTPC_PHCD (1<<22) /* Phy clock disable */ #define HOSTPC_PSPD (3<<25) /* Port speed detection */ + }; + + /* Broadcom-proprietary USB_EHCI_INSNREG00 @ 0x80 */ + u32 brcm_insnreg[4]; + }; - u32 reserved5[17]; + u32 reserved5[2]; /* USBMODE_EX: offset 0xc8 */ u32 usbmode_ex; /* USB Device mode extension */ diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 75c7538e350a..10fe57cf40be 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -329,6 +329,7 @@ struct usb_gadget_ops { struct usb_ep *(*match_ep)(struct usb_gadget *, struct usb_endpoint_descriptor *, struct usb_ss_ep_comp_descriptor *); + int (*check_config)(struct usb_gadget *gadget); }; /** @@ -491,7 +492,7 @@ extern char *usb_get_gadget_udc_name(void); */ static inline size_t usb_ep_align(struct usb_ep *ep, size_t len) { - int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc) & 0x7ff; + int max_packet_size = (size_t)usb_endpoint_maxp(ep->desc); return round_up(len, max_packet_size); } @@ -608,6 +609,7 @@ int usb_gadget_connect(struct usb_gadget *gadget); int usb_gadget_disconnect(struct usb_gadget *gadget); int usb_gadget_deactivate(struct usb_gadget *gadget); int usb_gadget_activate(struct usb_gadget *gadget); +int usb_gadget_check_config(struct usb_gadget *gadget); #else static inline int usb_gadget_frame_number(struct usb_gadget *gadget) { return 0; } @@ -631,6 +633,8 @@ static inline int usb_gadget_deactivate(struct usb_gadget *gadget) { return 0; } static inline int usb_gadget_activate(struct usb_gadget *gadget) { return 0; } +static inline int usb_gadget_check_config(struct usb_gadget *gadget) +{ return 0; } #endif /* CONFIG_USB_GADGET */ /*-------------------------------------------------------------------------*/ diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h index 3aee78dda16d..784659d4dc99 100644 --- a/include/linux/usb/otg-fsm.h +++ b/include/linux/usb/otg-fsm.h @@ -196,6 +196,7 @@ struct otg_fsm { struct mutex lock; u8 *host_req_flag; struct delayed_work hnp_polling_work; + bool hnp_work_inited; bool state_changed; }; diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 7ceeecbb9e02..6475f880be37 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h @@ -128,5 +128,6 @@ enum usb_dr_mode { * and returns the corresponding enum usb_dr_mode */ extern enum usb_dr_mode usb_get_dr_mode(struct device *dev); +extern enum usb_dr_mode usb_get_role_switch_default_mode(struct device *dev); #endif /* __LINUX_USB_OTG_H */ diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 331d2ccf0bcc..33cea484d1ad 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -60,16 +60,16 @@ extern int mfill_atomic_install_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd, extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long src_start, unsigned long len, - bool *mmap_changing, __u64 mode); + atomic_t *mmap_changing, __u64 mode); extern ssize_t mfill_zeropage(struct mm_struct *dst_mm, unsigned long dst_start, unsigned long len, - bool *mmap_changing); + atomic_t *mmap_changing); extern ssize_t mcopy_continue(struct mm_struct *dst_mm, unsigned long dst_start, - unsigned long len, bool *mmap_changing); + unsigned long len, atomic_t *mmap_changing); extern int mwriteprotect_range(struct mm_struct *dst_mm, unsigned long start, unsigned long len, - bool enable_wp, bool *mmap_changing); + bool enable_wp, atomic_t *mmap_changing); /* mm helpers */ static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma, diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 3357ac98878d..3972ab765de1 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -43,17 +43,17 @@ struct vdpa_vq_state_split { * @last_used_idx: used index */ struct vdpa_vq_state_packed { - u16 last_avail_counter:1; - u16 last_avail_idx:15; - u16 last_used_counter:1; - u16 last_used_idx:15; + u16 last_avail_counter:1; + u16 last_avail_idx:15; + u16 last_used_counter:1; + u16 last_used_idx:15; }; struct vdpa_vq_state { - union { - struct vdpa_vq_state_split split; - struct vdpa_vq_state_packed packed; - }; + union { + struct vdpa_vq_state_split split; + struct vdpa_vq_state_packed packed; + }; }; struct vdpa_mgmt_dev; @@ -65,6 +65,7 @@ struct vdpa_mgmt_dev; * @config: the configuration ops for this device. * @index: device index * @features_valid: were features initialized? for legacy guests + * @use_va: indicate whether virtual address must be used by this device * @nvqs: maximum number of supported virtqueues * @mdev: management device pointer; caller must setup when registering device as part * of dev_add() mgmtdev ops callback before invoking _vdpa_register_device(). @@ -75,6 +76,7 @@ struct vdpa_device { const struct vdpa_config_ops *config; unsigned int index; bool features_valid; + bool use_va; int nvqs; struct vdpa_mgmt_dev *mdev; }; @@ -90,6 +92,16 @@ struct vdpa_iova_range { }; /** + * Corresponding file area for device memory mapping + * @file: vma->vm_file for the mapping + * @offset: mapping offset in the vm_file + */ +struct vdpa_map_file { + struct file *file; + u64 offset; +}; + +/** * struct vdpa_config_ops - operations for configuring a vDPA device. * Note: vDPA device drivers are required to implement all of the * operations unless it is mentioned to be optional in the following @@ -131,7 +143,7 @@ struct vdpa_iova_range { * @vdev: vdpa device * @idx: virtqueue index * @state: pointer to returned state (last_avail_idx) - * @get_vq_notification: Get the notification area for a virtqueue + * @get_vq_notification: Get the notification area for a virtqueue * @vdev: vdpa device * @idx: virtqueue index * Returns the notifcation area @@ -171,6 +183,9 @@ struct vdpa_iova_range { * @set_status: Set the device status * @vdev: vdpa device * @status: virtio device status + * @reset: Reset device + * @vdev: vdpa device + * Returns integer: success (0) or error (< 0) * @get_config_size: Get the size of the configuration space * @vdev: vdpa device * Returns size_t: configuration size @@ -255,6 +270,7 @@ struct vdpa_config_ops { u32 (*get_vendor_id)(struct vdpa_device *vdev); u8 (*get_status)(struct vdpa_device *vdev); void (*set_status)(struct vdpa_device *vdev, u8 status); + int (*reset)(struct vdpa_device *vdev); size_t (*get_config_size)(struct vdpa_device *vdev); void (*get_config)(struct vdpa_device *vdev, unsigned int offset, void *buf, unsigned int len); @@ -266,7 +282,7 @@ struct vdpa_config_ops { /* DMA ops */ int (*set_map)(struct vdpa_device *vdev, struct vhost_iotlb *iotlb); int (*dma_map)(struct vdpa_device *vdev, u64 iova, u64 size, - u64 pa, u32 perm); + u64 pa, u32 perm, void *opaque); int (*dma_unmap)(struct vdpa_device *vdev, u64 iova, u64 size); /* Free device resources */ @@ -275,14 +291,27 @@ struct vdpa_config_ops { struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, - size_t size, const char *name); + size_t size, const char *name, + bool use_va); -#define vdpa_alloc_device(dev_struct, member, parent, config, name) \ +/** + * vdpa_alloc_device - allocate and initilaize a vDPA device + * + * @dev_struct: the type of the parent structure + * @member: the name of struct vdpa_device within the @dev_struct + * @parent: the parent device + * @config: the bus operations that is supported by this device + * @name: name of the vdpa device + * @use_va: indicate whether virtual address must be used by this device + * + * Return allocated data structure or ERR_PTR upon error + */ +#define vdpa_alloc_device(dev_struct, member, parent, config, name, use_va) \ container_of(__vdpa_alloc_device( \ parent, config, \ sizeof(dev_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ - dev_struct, member)), name), \ + dev_struct, member)), name, use_va), \ dev_struct, member) int vdpa_register_device(struct vdpa_device *vdev, int nvqs); @@ -337,27 +366,27 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev) return vdev->dma_dev; } -static inline void vdpa_reset(struct vdpa_device *vdev) +static inline int vdpa_reset(struct vdpa_device *vdev) { - const struct vdpa_config_ops *ops = vdev->config; + const struct vdpa_config_ops *ops = vdev->config; vdev->features_valid = false; - ops->set_status(vdev, 0); + return ops->reset(vdev); } static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features) { - const struct vdpa_config_ops *ops = vdev->config; + const struct vdpa_config_ops *ops = vdev->config; vdev->features_valid = true; - return ops->set_features(vdev, features); + return ops->set_features(vdev, features); } - -static inline void vdpa_get_config(struct vdpa_device *vdev, unsigned offset, - void *buf, unsigned int len) +static inline void vdpa_get_config(struct vdpa_device *vdev, + unsigned int offset, void *buf, + unsigned int len) { - const struct vdpa_config_ops *ops = vdev->config; + const struct vdpa_config_ops *ops = vdev->config; /* * Config accesses aren't supposed to trigger before features are set. diff --git a/include/linux/vfio.h b/include/linux/vfio.h index a2c5b30e1763..b53a9557884a 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -15,13 +15,28 @@ #include <linux/poll.h> #include <uapi/linux/vfio.h> +/* + * VFIO devices can be placed in a set, this allows all devices to share this + * structure and the VFIO core will provide a lock that is held around + * open_device()/close_device() for all devices in the set. + */ +struct vfio_device_set { + void *set_id; + struct mutex lock; + struct list_head device_list; + unsigned int device_count; +}; + struct vfio_device { struct device *dev; const struct vfio_device_ops *ops; struct vfio_group *group; + struct vfio_device_set *dev_set; + struct list_head dev_set_list; /* Members below here are private, not for driver use */ refcount_t refcount; + unsigned int open_count; struct completion comp; struct list_head group_next; }; @@ -29,8 +44,8 @@ struct vfio_device { /** * struct vfio_device_ops - VFIO bus driver device callbacks * - * @open: Called when userspace creates new file descriptor for device - * @release: Called when userspace releases file descriptor for device + * @open_device: Called when the first file descriptor is opened for this device + * @close_device: Opposite of open_device * @read: Perform read(2) on device file descriptor * @write: Perform write(2) on device file descriptor * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* @@ -43,8 +58,8 @@ struct vfio_device { */ struct vfio_device_ops { char *name; - int (*open)(struct vfio_device *vdev); - void (*release)(struct vfio_device *vdev); + int (*open_device)(struct vfio_device *vdev); + void (*close_device)(struct vfio_device *vdev); ssize_t (*read)(struct vfio_device *vdev, char __user *buf, size_t count, loff_t *ppos); ssize_t (*write)(struct vfio_device *vdev, const char __user *buf, @@ -61,11 +76,14 @@ extern void vfio_iommu_group_put(struct iommu_group *group, struct device *dev); void vfio_init_group_dev(struct vfio_device *device, struct device *dev, const struct vfio_device_ops *ops); +void vfio_uninit_group_dev(struct vfio_device *device); int vfio_register_group_dev(struct vfio_device *device); void vfio_unregister_group_dev(struct vfio_device *device); extern struct vfio_device *vfio_device_get_from_dev(struct device *dev); extern void vfio_device_put(struct vfio_device *device); +int vfio_assign_device_set(struct vfio_device *device, void *set_id); + /* events for the backend driver notify callback */ enum vfio_iommu_notify_type { VFIO_IOMMU_CONTAINER_CLOSE = 0, diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h new file mode 100644 index 000000000000..ef9a44b6cf5d --- /dev/null +++ b/include/linux/vfio_pci_core.h @@ -0,0 +1,239 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Red Hat, Inc. All rights reserved. + * Author: Alex Williamson <alex.williamson@redhat.com> + * + * Derived from original vfio: + * Copyright 2010 Cisco Systems, Inc. All rights reserved. + * Author: Tom Lyon, pugs@cisco.com + */ + +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/vfio.h> +#include <linux/irqbypass.h> +#include <linux/types.h> +#include <linux/uuid.h> +#include <linux/notifier.h> + +#ifndef VFIO_PCI_CORE_H +#define VFIO_PCI_CORE_H + +#define VFIO_PCI_OFFSET_SHIFT 40 + +#define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) +#define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) + +/* Special capability IDs predefined access */ +#define PCI_CAP_ID_INVALID 0xFF /* default raw access */ +#define PCI_CAP_ID_INVALID_VIRT 0xFE /* default virt access */ + +/* Cap maximum number of ioeventfds per device (arbitrary) */ +#define VFIO_PCI_IOEVENTFD_MAX 1000 + +struct vfio_pci_ioeventfd { + struct list_head next; + struct vfio_pci_core_device *vdev; + struct virqfd *virqfd; + void __iomem *addr; + uint64_t data; + loff_t pos; + int bar; + int count; + bool test_mem; +}; + +struct vfio_pci_irq_ctx { + struct eventfd_ctx *trigger; + struct virqfd *unmask; + struct virqfd *mask; + char *name; + bool masked; + struct irq_bypass_producer producer; +}; + +struct vfio_pci_core_device; +struct vfio_pci_region; + +struct vfio_pci_regops { + ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + void (*release)(struct vfio_pci_core_device *vdev, + struct vfio_pci_region *region); + int (*mmap)(struct vfio_pci_core_device *vdev, + struct vfio_pci_region *region, + struct vm_area_struct *vma); + int (*add_capability)(struct vfio_pci_core_device *vdev, + struct vfio_pci_region *region, + struct vfio_info_cap *caps); +}; + +struct vfio_pci_region { + u32 type; + u32 subtype; + const struct vfio_pci_regops *ops; + void *data; + size_t size; + u32 flags; +}; + +struct vfio_pci_dummy_resource { + struct resource resource; + int index; + struct list_head res_next; +}; + +struct vfio_pci_vf_token { + struct mutex lock; + uuid_t uuid; + int users; +}; + +struct vfio_pci_mmap_vma { + struct vm_area_struct *vma; + struct list_head vma_next; +}; + +struct vfio_pci_core_device { + struct vfio_device vdev; + struct pci_dev *pdev; + void __iomem *barmap[PCI_STD_NUM_BARS]; + bool bar_mmap_supported[PCI_STD_NUM_BARS]; + u8 *pci_config_map; + u8 *vconfig; + struct perm_bits *msi_perm; + spinlock_t irqlock; + struct mutex igate; + struct vfio_pci_irq_ctx *ctx; + int num_ctx; + int irq_type; + int num_regions; + struct vfio_pci_region *region; + u8 msi_qmax; + u8 msix_bar; + u16 msix_size; + u32 msix_offset; + u32 rbar[7]; + bool pci_2_3; + bool virq_disabled; + bool reset_works; + bool extended_caps; + bool bardirty; + bool has_vga; + bool needs_reset; + bool nointx; + bool needs_pm_restore; + struct pci_saved_state *pci_saved_state; + struct pci_saved_state *pm_save; + int ioeventfds_nr; + struct eventfd_ctx *err_trigger; + struct eventfd_ctx *req_trigger; + struct list_head dummy_resources_list; + struct mutex ioeventfds_lock; + struct list_head ioeventfds_list; + struct vfio_pci_vf_token *vf_token; + struct notifier_block nb; + struct mutex vma_lock; + struct list_head vma_list; + struct rw_semaphore memory_lock; +}; + +#define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) +#define is_msi(vdev) (vdev->irq_type == VFIO_PCI_MSI_IRQ_INDEX) +#define is_msix(vdev) (vdev->irq_type == VFIO_PCI_MSIX_IRQ_INDEX) +#define is_irq_none(vdev) (!(is_intx(vdev) || is_msi(vdev) || is_msix(vdev))) +#define irq_is(vdev, type) (vdev->irq_type == type) + +extern void vfio_pci_intx_mask(struct vfio_pci_core_device *vdev); +extern void vfio_pci_intx_unmask(struct vfio_pci_core_device *vdev); + +extern int vfio_pci_set_irqs_ioctl(struct vfio_pci_core_device *vdev, + uint32_t flags, unsigned index, + unsigned start, unsigned count, void *data); + +extern ssize_t vfio_pci_config_rw(struct vfio_pci_core_device *vdev, + char __user *buf, size_t count, + loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_bar_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern ssize_t vfio_pci_vga_rw(struct vfio_pci_core_device *vdev, char __user *buf, + size_t count, loff_t *ppos, bool iswrite); + +extern long vfio_pci_ioeventfd(struct vfio_pci_core_device *vdev, loff_t offset, + uint64_t data, int count, int fd); + +extern int vfio_pci_init_perm_bits(void); +extern void vfio_pci_uninit_perm_bits(void); + +extern int vfio_config_init(struct vfio_pci_core_device *vdev); +extern void vfio_config_free(struct vfio_pci_core_device *vdev); + +extern int vfio_pci_register_dev_region(struct vfio_pci_core_device *vdev, + unsigned int type, unsigned int subtype, + const struct vfio_pci_regops *ops, + size_t size, u32 flags, void *data); + +extern int vfio_pci_set_power_state(struct vfio_pci_core_device *vdev, + pci_power_t state); + +extern bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); +extern void vfio_pci_zap_and_down_write_memory_lock(struct vfio_pci_core_device + *vdev); +extern u16 vfio_pci_memory_lock_and_enable(struct vfio_pci_core_device *vdev); +extern void vfio_pci_memory_unlock_and_restore(struct vfio_pci_core_device *vdev, + u16 cmd); + +#ifdef CONFIG_VFIO_PCI_IGD +extern int vfio_pci_igd_init(struct vfio_pci_core_device *vdev); +#else +static inline int vfio_pci_igd_init(struct vfio_pci_core_device *vdev) +{ + return -ENODEV; +} +#endif + +#ifdef CONFIG_S390 +extern int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, + struct vfio_info_cap *caps); +#else +static inline int vfio_pci_info_zdev_add_caps(struct vfio_pci_core_device *vdev, + struct vfio_info_cap *caps) +{ + return -ENODEV; +} +#endif + +/* Will be exported for vfio pci drivers usage */ +void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga, + bool is_disable_idle_d3); +void vfio_pci_core_close_device(struct vfio_device *core_vdev); +void vfio_pci_core_init_device(struct vfio_pci_core_device *vdev, + struct pci_dev *pdev, + const struct vfio_device_ops *vfio_pci_ops); +int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev); +void vfio_pci_core_uninit_device(struct vfio_pci_core_device *vdev); +void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev); +int vfio_pci_core_sriov_configure(struct pci_dev *pdev, int nr_virtfn); +extern const struct pci_error_handlers vfio_pci_core_err_handlers; +long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, + unsigned long arg); +ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf, + size_t count, loff_t *ppos); +ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, + size_t count, loff_t *ppos); +int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma); +void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count); +int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); +int vfio_pci_core_enable(struct vfio_pci_core_device *vdev); +void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); +void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); + +static inline bool vfio_pci_is_vga(struct pci_dev *pdev) +{ + return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; +} + +#endif /* VFIO_PCI_CORE_H */ diff --git a/include/linux/vhost_iotlb.h b/include/linux/vhost_iotlb.h index 6b09b786a762..2d0e2f52f938 100644 --- a/include/linux/vhost_iotlb.h +++ b/include/linux/vhost_iotlb.h @@ -17,6 +17,7 @@ struct vhost_iotlb_map { u32 perm; u32 flags_padding; u64 __subtree_last; + void *opaque; }; #define VHOST_IOTLB_FLAG_RETIRE 0x1 @@ -29,6 +30,8 @@ struct vhost_iotlb { unsigned int flags; }; +int vhost_iotlb_add_range_ctx(struct vhost_iotlb *iotlb, u64 start, u64 last, + u64 addr, unsigned int perm, void *opaque); int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, u64 start, u64 last, u64 addr, unsigned int perm); void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index b1894e0323fa..41edbc01ffa4 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -110,6 +110,7 @@ struct virtio_device { bool config_enabled; bool config_change_pending; spinlock_t config_lock; + spinlock_t vqs_list_lock; /* Protects VQs list access */ struct device dev; struct virtio_device_id id; const struct virtio_config_ops *config; diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ae0dd1948c2b..a185cc75ff52 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -33,6 +33,8 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGREUSE, PGSTEAL_KSWAPD, PGSTEAL_DIRECT, + PGDEMOTE_KSWAPD, + PGDEMOTE_DIRECT, PGSCAN_KSWAPD, PGSCAN_DIRECT, PGSCAN_DIRECT_THROTTLE, diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 2644425b6dce..671d402c3778 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -225,9 +225,6 @@ static inline bool is_vm_area_hugepages(const void *addr) } #ifdef CONFIG_MMU -int vmap_range(unsigned long addr, unsigned long end, - phys_addr_t phys_addr, pgprot_t prot, - unsigned int max_page_shift); void vunmap_range(unsigned long addr, unsigned long end); static inline void set_vm_flush_reset_perms(void *addr) { diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h index 6d28bc433c1c..6a2f51ebbfd3 100644 --- a/include/linux/vmpressure.h +++ b/include/linux/vmpressure.h @@ -37,7 +37,7 @@ extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio); extern void vmpressure_init(struct vmpressure *vmpr); extern void vmpressure_cleanup(struct vmpressure *vmpr); extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg); -extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr); +extern struct mem_cgroup *vmpressure_to_memcg(struct vmpressure *vmpr); extern int vmpressure_register_event(struct mem_cgroup *memcg, struct eventfd_ctx *eventfd, const char *args); diff --git a/include/linux/vringh.h b/include/linux/vringh.h index 84db7b8f912f..212892cf9822 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -14,6 +14,7 @@ #include <linux/virtio_byteorder.h> #include <linux/uio.h> #include <linux/slab.h> +#include <linux/spinlock.h> #if IS_REACHABLE(CONFIG_VHOST_IOTLB) #include <linux/dma-direction.h> #include <linux/vhost_iotlb.h> diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h index 0da94a6dee15..b5ab452fca5b 100644 --- a/include/linux/vt_kern.h +++ b/include/linux/vt_kern.h @@ -148,26 +148,26 @@ void hide_boot_cursor(bool hide); /* keyboard provided interfaces */ int vt_do_diacrit(unsigned int cmd, void __user *up, int eperm); -int vt_do_kdskbmode(int console, unsigned int arg); -int vt_do_kdskbmeta(int console, unsigned int arg); +int vt_do_kdskbmode(unsigned int console, unsigned int arg); +int vt_do_kdskbmeta(unsigned int console, unsigned int arg); int vt_do_kbkeycode_ioctl(int cmd, struct kbkeycode __user *user_kbkc, int perm); int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, - int console); + unsigned int console); int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm); -int vt_do_kdskled(int console, int cmd, unsigned long arg, int perm); -int vt_do_kdgkbmode(int console); -int vt_do_kdgkbmeta(int console); -void vt_reset_unicode(int console); +int vt_do_kdskled(unsigned int console, int cmd, unsigned long arg, int perm); +int vt_do_kdgkbmode(unsigned int console); +int vt_do_kdgkbmeta(unsigned int console); +void vt_reset_unicode(unsigned int console); int vt_get_shift_state(void); -void vt_reset_keyboard(int console); -int vt_get_leds(int console, int flag); -int vt_get_kbd_mode_bit(int console, int bit); -void vt_set_kbd_mode_bit(int console, int bit); -void vt_clr_kbd_mode_bit(int console, int bit); -void vt_set_led_state(int console, int leds); -void vt_kbd_con_start(int console); -void vt_kbd_con_stop(int console); +void vt_reset_keyboard(unsigned int console); +int vt_get_leds(unsigned int console, int flag); +int vt_get_kbd_mode_bit(unsigned int console, int bit); +void vt_set_kbd_mode_bit(unsigned int console, int bit); +void vt_clr_kbd_mode_bit(unsigned int console, int bit); +void vt_set_led_state(unsigned int console, int leds); +void vt_kbd_con_start(unsigned int console); +void vt_kbd_con_stop(unsigned int console); void vc_scrolldelta_helper(struct vc_data *c, int lines, unsigned int rolled_over, void *_base, unsigned int size); diff --git a/include/linux/wait.h b/include/linux/wait.h index 6598ae35e1b5..93dab0e9580f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -56,7 +56,7 @@ struct task_struct; #define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ - .head = { &(name).head, &(name).head } } + .head = LIST_HEAD_INIT(name.head) } #define DECLARE_WAIT_QUEUE_HEAD(name) \ struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name) diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 9b19e6bb68b5..99660197a36c 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -107,6 +107,7 @@ struct watchdog_device { unsigned int max_hw_heartbeat_ms; struct notifier_block reboot_nb; struct notifier_block restart_nb; + struct notifier_block pm_nb; void *driver_data; struct watchdog_core_data *wd_data; unsigned long status; @@ -116,6 +117,7 @@ struct watchdog_device { #define WDOG_STOP_ON_REBOOT 2 /* Should be stopped on reboot */ #define WDOG_HW_RUNNING 3 /* True if HW watchdog running */ #define WDOG_STOP_ON_UNREGISTER 4 /* Should be stopped on unregister */ +#define WDOG_NO_PING_ON_SUSPEND 5 /* Ping worker should be stopped on suspend */ struct list_head deferred; }; @@ -156,6 +158,12 @@ static inline void watchdog_stop_on_unregister(struct watchdog_device *wdd) set_bit(WDOG_STOP_ON_UNREGISTER, &wdd->status); } +/* Use the following function to stop the wdog ping worker when suspending */ +static inline void watchdog_stop_ping_on_suspend(struct watchdog_device *wdd) +{ + set_bit(WDOG_NO_PING_ON_SUSPEND, &wdd->status); +} + /* Use the following function to check if a timeout value is invalid */ static inline bool watchdog_timeout_invalid(struct watchdog_device *wdd, unsigned int t) { @@ -209,6 +217,8 @@ extern int watchdog_init_timeout(struct watchdog_device *wdd, unsigned int timeout_parm, struct device *dev); extern int watchdog_register_device(struct watchdog_device *); extern void watchdog_unregister_device(struct watchdog_device *); +int watchdog_dev_suspend(struct watchdog_device *wdd); +int watchdog_dev_resume(struct watchdog_device *wdd); int watchdog_set_last_hw_keepalive(struct watchdog_device *, unsigned int); diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d15a7730ee18..2ebef6b1a3d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -29,7 +29,7 @@ void delayed_work_timer_fn(struct timer_list *t); enum { WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ - WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ + WORK_STRUCT_INACTIVE_BIT= 1, /* work item is inactive */ WORK_STRUCT_PWQ_BIT = 2, /* data points to pwq */ WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ #ifdef CONFIG_DEBUG_OBJECTS_WORK @@ -42,7 +42,7 @@ enum { WORK_STRUCT_COLOR_BITS = 4, WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, - WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, + WORK_STRUCT_INACTIVE = 1 << WORK_STRUCT_INACTIVE_BIT, WORK_STRUCT_PWQ = 1 << WORK_STRUCT_PWQ_BIT, WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, #ifdef CONFIG_DEBUG_OBJECTS_WORK @@ -51,19 +51,14 @@ enum { WORK_STRUCT_STATIC = 0, #endif - /* - * The last color is no color used for works which don't - * participate in workqueue flushing. - */ - WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS) - 1, - WORK_NO_COLOR = WORK_NR_COLORS, + WORK_NR_COLORS = (1 << WORK_STRUCT_COLOR_BITS), /* not bound to any CPU, prefer the local CPU */ WORK_CPU_UNBOUND = NR_CPUS, /* * Reserve 8 bits off of pwq pointer w/ debugobjects turned off. - * This makes pwqs aligned to 256 bytes and allows 15 workqueue + * This makes pwqs aligned to 256 bytes and allows 16 workqueue * flush colors. */ WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + @@ -324,7 +319,7 @@ enum { * to execute and tries to keep idle cores idle to conserve power; * however, for example, a per-cpu work item scheduled from an * interrupt handler on an idle CPU will force the scheduler to - * excute the work item on that CPU breaking the idleness, which in + * execute the work item on that CPU breaking the idleness, which in * turn may lead to more scheduling choices which are sub-optimal * in terms of power consumption. * diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 667e86cfbdcf..d1f65adf6a26 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -218,7 +218,7 @@ void wbc_attach_and_unlock_inode(struct writeback_control *wbc, void wbc_detach_inode(struct writeback_control *wbc); void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, size_t bytes); -int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr_pages, +int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, enum wb_reason reason, struct wb_completion *done); void cgroup_writeback_umount(void); bool cleanup_offline_cgwb(struct bdi_writeback *wb); @@ -336,14 +336,9 @@ static inline void cgroup_writeback_umount(void) /* * mm/page-writeback.c */ -#ifdef CONFIG_BLOCK void laptop_io_completion(struct backing_dev_info *info); void laptop_sync_completion(void); -void laptop_mode_sync(struct work_struct *work); void laptop_mode_timer_fn(struct timer_list *t); -#else -static inline void laptop_sync_completion(void) { } -#endif bool node_dirty_ok(struct pglist_data *pgdat); int wb_domain_init(struct wb_domain *dom, gfp_t gfp); #ifdef CONFIG_CGROUP_WRITEBACK @@ -379,7 +374,7 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); -void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time); +void wb_update_bandwidth(struct bdi_writeback *wb); void balance_dirty_pages_ratelimited(struct address_space *mapping); bool wb_over_bg_thresh(struct bdi_writeback *wb); diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h index b77f39f319ad..29db736af86d 100644 --- a/include/linux/ww_mutex.h +++ b/include/linux/ww_mutex.h @@ -18,6 +18,24 @@ #define __LINUX_WW_MUTEX_H #include <linux/mutex.h> +#include <linux/rtmutex.h> + +#if defined(CONFIG_DEBUG_MUTEXES) || \ + (defined(CONFIG_PREEMPT_RT) && defined(CONFIG_DEBUG_RT_MUTEXES)) +#define DEBUG_WW_MUTEXES +#endif + +#ifndef CONFIG_PREEMPT_RT +#define WW_MUTEX_BASE mutex +#define ww_mutex_base_init(l,n,k) __mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) mutex_trylock(l) +#define ww_mutex_base_is_locked(b) mutex_is_locked((b)) +#else +#define WW_MUTEX_BASE rt_mutex +#define ww_mutex_base_init(l,n,k) __rt_mutex_init(l,n,k) +#define ww_mutex_base_trylock(l) rt_mutex_trylock(l) +#define ww_mutex_base_is_locked(b) rt_mutex_base_is_locked(&(b)->rtmutex) +#endif struct ww_class { atomic_long_t stamp; @@ -28,16 +46,24 @@ struct ww_class { unsigned int is_wait_die; }; +struct ww_mutex { + struct WW_MUTEX_BASE base; + struct ww_acquire_ctx *ctx; +#ifdef DEBUG_WW_MUTEXES + struct ww_class *ww_class; +#endif +}; + struct ww_acquire_ctx { struct task_struct *task; unsigned long stamp; unsigned int acquired; unsigned short wounded; unsigned short is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES unsigned int done_acquire; struct ww_class *ww_class; - struct ww_mutex *contending_lock; + void *contending_lock; #endif #ifdef CONFIG_DEBUG_LOCK_ALLOC struct lockdep_map dep_map; @@ -74,9 +100,9 @@ struct ww_acquire_ctx { static inline void ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) { - __mutex_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); + ww_mutex_base_init(&lock->base, ww_class->mutex_name, &ww_class->mutex_key); lock->ctx = NULL; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lock->ww_class = ww_class; #endif } @@ -113,7 +139,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, ctx->acquired = 0; ctx->wounded = false; ctx->is_wait_die = ww_class->is_wait_die; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES ctx->ww_class = ww_class; ctx->done_acquire = 0; ctx->contending_lock = NULL; @@ -143,7 +169,7 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, */ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES lockdep_assert_held(ctx); DEBUG_LOCKS_WARN_ON(ctx->done_acquire); @@ -163,7 +189,7 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) #ifdef CONFIG_DEBUG_LOCK_ALLOC mutex_release(&ctx->dep_map, _THIS_IP_); #endif -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(ctx->acquired); if (!IS_ENABLED(CONFIG_PROVE_LOCKING)) /* @@ -269,7 +295,7 @@ static inline void ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { int ret; -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif ret = ww_mutex_lock(lock, ctx); @@ -305,7 +331,7 @@ static inline int __must_check ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) { -#ifdef CONFIG_DEBUG_MUTEXES +#ifdef DEBUG_WW_MUTEXES DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); #endif return ww_mutex_lock_interruptible(lock, ctx); @@ -322,7 +348,7 @@ extern void ww_mutex_unlock(struct ww_mutex *lock); */ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) { - return mutex_trylock(&lock->base); + return ww_mutex_base_trylock(&lock->base); } /*** @@ -335,7 +361,9 @@ static inline int __must_check ww_mutex_trylock(struct ww_mutex *lock) */ static inline void ww_mutex_destroy(struct ww_mutex *lock) { +#ifndef CONFIG_PREEMPT_RT mutex_destroy(&lock->base); +#endif } /** @@ -346,7 +374,7 @@ static inline void ww_mutex_destroy(struct ww_mutex *lock) */ static inline bool ww_mutex_is_locked(struct ww_mutex *lock) { - return mutex_is_locked(&lock->base); + return ww_mutex_base_is_locked(&lock->base); } #endif diff --git a/include/linux/zorro.h b/include/linux/zorro.h index e2e4de188d84..db7416ed6057 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h @@ -29,7 +29,6 @@ struct zorro_dev { struct ExpansionRom rom; zorro_id id; - struct zorro_driver *driver; /* which driver has allocated this device */ struct device dev; /* Generic device interface */ u16 slotaddr; u16 slotsize; |