summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-08-11 04:07:44 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-08-11 04:07:44 +0200
commit97d052ea3fa853b9aabcc4baca1a605cb1188611 (patch)
tree48901cbccafdc5870c7dad9cce98e9338065f8a3 /include
parentMerge tag 'f2fs-for-5.9-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentlocking/seqlock, headers: Untangle the spaghetti monster (diff)
downloadlinux-97d052ea3fa853b9aabcc4baca1a605cb1188611.tar.xz
linux-97d052ea3fa853b9aabcc4baca1a605cb1188611.zip
Merge tag 'locking-urgent-2020-08-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Thomas Gleixner: "A set of locking fixes and updates: - Untangle the header spaghetti which causes build failures in various situations caused by the lockdep additions to seqcount to validate that the write side critical sections are non-preemptible. - The seqcount associated lock debug addons which were blocked by the above fallout. seqcount writers contrary to seqlock writers must be externally serialized, which usually happens via locking - except for strict per CPU seqcounts. As the lock is not part of the seqcount, lockdep cannot validate that the lock is held. This new debug mechanism adds the concept of associated locks. sequence count has now lock type variants and corresponding initializers which take a pointer to the associated lock used for writer serialization. If lockdep is enabled the pointer is stored and write_seqcount_begin() has a lockdep assertion to validate that the lock is held. Aside of the type and the initializer no other code changes are required at the seqcount usage sites. The rest of the seqcount API is unchanged and determines the type at compile time with the help of _Generic which is possible now that the minimal GCC version has been moved up. Adding this lockdep coverage unearthed a handful of seqcount bugs which have been addressed already independent of this. While generally useful this comes with a Trojan Horse twist: On RT kernels the write side critical section can become preemtible if the writers are serialized by an associated lock, which leads to the well known reader preempts writer livelock. RT prevents this by storing the associated lock pointer independent of lockdep in the seqcount and changing the reader side to block on the lock when a reader detects that a writer is in the write side critical section. - Conversion of seqcount usage sites to associated types and initializers" * tag 'locking-urgent-2020-08-10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits) locking/seqlock, headers: Untangle the spaghetti monster locking, arch/ia64: Reduce <asm/smp.h> header dependencies by moving XTP bits into the new <asm/xtp.h> header x86/headers: Remove APIC headers from <asm/smp.h> seqcount: More consistent seqprop names seqcount: Compress SEQCNT_LOCKNAME_ZERO() seqlock: Fold seqcount_LOCKNAME_init() definition seqlock: Fold seqcount_LOCKNAME_t definition seqlock: s/__SEQ_LOCKDEP/__SEQ_LOCK/g hrtimer: Use sequence counter with associated raw spinlock kvm/eventfd: Use sequence counter with associated spinlock userfaultfd: Use sequence counter with associated spinlock NFSv4: Use sequence counter with associated spinlock iocost: Use sequence counter with associated spinlock raid5: Use sequence counter with associated spinlock vfs: Use sequence counter with associated spinlock timekeeping: Use sequence counter with associated raw spinlock xfrm: policy: Use sequence counters with associated lock netfilter: nft_set_rbtree: Use sequence counter with associated rwlock netfilter: conntrack: Use sequence counter with associated spinlock sched: tasks: Use sequence counter with associated spinlock ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/dma-resv.h4
-rw-r--r--include/linux/dynamic_queue_limits.h2
-rw-r--r--include/linux/fs_struct.h2
-rw-r--r--include/linux/hrtimer.h3
-rw-r--r--include/linux/ktime.h1
-rw-r--r--include/linux/kvm_irqfd.h2
-rw-r--r--include/linux/lockdep.h1
-rw-r--r--include/linux/mutex.h11
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/seqlock.h368
-rw-r--r--include/linux/time.h1
-rw-r--r--include/linux/videodev2.h1
-rw-r--r--include/linux/ww_mutex.h8
-rw-r--r--include/net/netfilter/nf_conntrack.h2
15 files changed, 310 insertions, 101 deletions
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index a81f0c3cf352..65d975bf9390 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -89,7 +89,7 @@ extern struct dentry_stat_t dentry_stat;
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
- seqcount_t d_seq; /* per dentry seqlock */
+ seqcount_spinlock_t d_seq; /* per dentry seqlock */
struct hlist_bl_node d_hash; /* lookup hash list */
struct dentry *d_parent; /* parent directory */
struct qstr d_name;
diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h
index ee50d10f052b..d44a77e8a7e3 100644
--- a/include/linux/dma-resv.h
+++ b/include/linux/dma-resv.h
@@ -46,8 +46,6 @@
#include <linux/rcupdate.h>
extern struct ww_class reservation_ww_class;
-extern struct lock_class_key reservation_seqcount_class;
-extern const char reservation_seqcount_string[];
/**
* struct dma_resv_list - a list of shared fences
@@ -71,7 +69,7 @@ struct dma_resv_list {
*/
struct dma_resv {
struct ww_mutex lock;
- seqcount_t seq;
+ seqcount_ww_mutex_t seq;
struct dma_fence __rcu *fence_excl;
struct dma_resv_list __rcu *fence;
diff --git a/include/linux/dynamic_queue_limits.h b/include/linux/dynamic_queue_limits.h
index 99fc06f0afc1..407c2f281b64 100644
--- a/include/linux/dynamic_queue_limits.h
+++ b/include/linux/dynamic_queue_limits.h
@@ -38,6 +38,8 @@
#ifdef __KERNEL__
+#include <asm/bug.h>
+
struct dql {
/* Fields accessed in enqueue path (dql_queued) */
unsigned int num_queued; /* Total ever queued */
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
index cf1015abfbf2..783b48dedb72 100644
--- a/include/linux/fs_struct.h
+++ b/include/linux/fs_struct.h
@@ -9,7 +9,7 @@
struct fs_struct {
int users;
spinlock_t lock;
- seqcount_t seq;
+ seqcount_spinlock_t seq;
int umask;
int in_exec;
struct path root, pwd;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 15c8ac313678..107cedd7019a 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/percpu.h>
+#include <linux/seqlock.h>
#include <linux/timer.h>
#include <linux/timerqueue.h>
@@ -159,7 +160,7 @@ struct hrtimer_clock_base {
struct hrtimer_cpu_base *cpu_base;
unsigned int index;
clockid_t clockid;
- seqcount_t seq;
+ seqcount_raw_spinlock_t seq;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t (*get_time)(void);
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 42d2e6ac35f2..a12b5523cc18 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -23,6 +23,7 @@
#include <linux/time.h>
#include <linux/jiffies.h>
+#include <asm/bug.h>
/* Nanosecond scalar representation for kernel time values */
typedef s64 ktime_t;
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index dc1da020305b..dac047abdba7 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -42,7 +42,7 @@ struct kvm_kernel_irqfd {
wait_queue_entry_t wait;
/* Update side is protected by irqfds.lock */
struct kvm_kernel_irq_routing_entry irq_entry;
- seqcount_t irq_entry_sc;
+ seqcount_spinlock_t irq_entry_sc;
/* Used for level IRQ fast-path */
int gsi;
struct work_struct inject;
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 39a35699d0d6..62a382d1845b 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -11,6 +11,7 @@
#define __LINUX_LOCKDEP_H
#include <linux/lockdep_types.h>
+#include <linux/smp.h>
#include <asm/percpu.h>
struct task_struct;
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index ae197cc00cc8..dcd185cbfe79 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -65,6 +65,17 @@ struct mutex {
#endif
};
+struct ww_class;
+struct ww_acquire_ctx;
+
+struct ww_mutex {
+ struct mutex base;
+ struct ww_acquire_ctx *ctx;
+#ifdef CONFIG_DEBUG_MUTEXES
+ struct ww_class *ww_class;
+#endif
+};
+
/*
* This is the control structure for tasks blocked on mutex,
* which resides on the blocked task's kernel stack:
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 52bcc9f48e17..53ddc02e2e79 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -32,6 +32,7 @@
#include <linux/task_io_accounting.h>
#include <linux/posix-timers.h>
#include <linux/rseq.h>
+#include <linux/seqlock.h>
#include <linux/kcsan.h>
/* task_struct member predeclarations (sorted alphabetically): */
@@ -1049,7 +1050,7 @@ struct task_struct {
/* Protected by ->alloc_lock: */
nodemask_t mems_allowed;
/* Seqence number to catch updates: */
- seqcount_t mems_allowed_seq;
+ seqcount_spinlock_t mems_allowed_seq;
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 54bc20496392..962d9768945f 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -10,13 +10,16 @@
*
* Copyrights:
* - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
+ * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
*/
-#include <linux/spinlock.h>
-#include <linux/preempt.h>
-#include <linux/lockdep.h>
#include <linux/compiler.h>
#include <linux/kcsan-checks.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+
#include <asm/processor.h>
/*
@@ -48,6 +51,10 @@
* This mechanism can't be used if the protected data contains pointers,
* as the writer can invalidate a pointer that a reader is following.
*
+ * If the write serialization mechanism is one of the common kernel
+ * locking primitives, use a sequence counter with associated lock
+ * (seqcount_LOCKTYPE_t) instead.
+ *
* If it's desired to automatically handle the sequence counter writer
* serialization and non-preemptibility requirements, use a sequential
* lock (seqlock_t) instead.
@@ -72,17 +79,18 @@ static inline void __seqcount_init(seqcount_t *s, const char *name,
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SEQCOUNT_DEP_MAP_INIT(lockname) \
- .dep_map = { .name = #lockname } \
+
+# define SEQCOUNT_DEP_MAP_INIT(lockname) \
+ .dep_map = { .name = #lockname }
/**
* seqcount_init() - runtime initializer for seqcount_t
* @s: Pointer to the seqcount_t instance
*/
-# define seqcount_init(s) \
- do { \
- static struct lock_class_key __key; \
- __seqcount_init((s), #s, &__key); \
+# define seqcount_init(s) \
+ do { \
+ static struct lock_class_key __key; \
+ __seqcount_init((s), #s, &__key); \
} while (0)
static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
@@ -108,9 +116,143 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*/
#define SEQCNT_ZERO(name) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(name) }
+/*
+ * Sequence counters with associated locks (seqcount_LOCKTYPE_t)
+ *
+ * A sequence counter which associates the lock used for writer
+ * serialization at initialization time. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ *
+ * For associated locks which do not implicitly disable preemption,
+ * preemption protection is enforced in the write side function.
+ *
+ * Lockdep is never used in any for the raw write variants.
+ *
+ * See Documentation/locking/seqlock.rst
+ */
+
+#ifdef CONFIG_LOCKDEP
+#define __SEQ_LOCK(expr) expr
+#else
+#define __SEQ_LOCK(expr)
+#endif
+
+/**
+ * typedef seqcount_LOCKNAME_t - sequence counter with LOCKTYPR associated
+ * @seqcount: The real sequence counter
+ * @lock: Pointer to the associated spinlock
+ *
+ * A plain sequence counter with external writer synchronization by a
+ * spinlock. The spinlock is associated to the sequence count in the
+ * static initializer or init function. This enables lockdep to validate
+ * that the write side critical section is properly serialized.
+ */
+
+/**
+ * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
+ * @s: Pointer to the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+ */
+
+/*
+ * SEQCOUNT_LOCKTYPE() - Instantiate seqcount_LOCKNAME_t and helpers
+ * @locktype: actual typename
+ * @lockname: name
+ * @preemptible: preemptibility of above locktype
+ * @lockmember: argument for lockdep_assert_held()
+ */
+#define SEQCOUNT_LOCKTYPE(locktype, lockname, preemptible, lockmember) \
+typedef struct seqcount_##lockname { \
+ seqcount_t seqcount; \
+ __SEQ_LOCK(locktype *lock); \
+} seqcount_##lockname##_t; \
+ \
+static __always_inline void \
+seqcount_##lockname##_init(seqcount_##lockname##_t *s, locktype *lock) \
+{ \
+ seqcount_init(&s->seqcount); \
+ __SEQ_LOCK(s->lock = lock); \
+} \
+ \
+static __always_inline seqcount_t * \
+__seqcount_##lockname##_ptr(seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
+static __always_inline bool \
+__seqcount_##lockname##_preemptible(seqcount_##lockname##_t *s) \
+{ \
+ return preemptible; \
+} \
+ \
+static __always_inline void \
+__seqcount_##lockname##_assert(seqcount_##lockname##_t *s) \
+{ \
+ __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+}
+
+/*
+ * __seqprop() for seqcount_t
+ */
+
+static inline seqcount_t *__seqcount_ptr(seqcount_t *s)
+{
+ return s;
+}
+
+static inline bool __seqcount_preemptible(seqcount_t *s)
+{
+ return false;
+}
+
+static inline void __seqcount_assert(seqcount_t *s)
+{
+ lockdep_assert_preemption_disabled();
+}
+
+SEQCOUNT_LOCKTYPE(raw_spinlock_t, raw_spinlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(spinlock_t, spinlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(rwlock_t, rwlock, false, s->lock)
+SEQCOUNT_LOCKTYPE(struct mutex, mutex, true, s->lock)
+SEQCOUNT_LOCKTYPE(struct ww_mutex, ww_mutex, true, &s->lock->base)
+
+/**
+ * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
+ * @name: Name of the seqcount_LOCKNAME_t instance
+ * @lock: Pointer to the associated LOCKTYPE
+ */
+
+#define SEQCOUNT_LOCKTYPE_ZERO(seq_name, assoc_lock) { \
+ .seqcount = SEQCNT_ZERO(seq_name.seqcount), \
+ __SEQ_LOCK(.lock = (assoc_lock)) \
+}
+
+#define SEQCNT_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RAW_SPINLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_RWLOCK_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKTYPE_ZERO(name, lock)
+
+
+#define __seqprop_case(s, lockname, prop) \
+ seqcount_##lockname##_t: __seqcount_##lockname##_##prop((void *)(s))
+
+#define __seqprop(s, prop) _Generic(*(s), \
+ seqcount_t: __seqcount_##prop((void *)(s)), \
+ __seqprop_case((s), raw_spinlock, prop), \
+ __seqprop_case((s), spinlock, prop), \
+ __seqprop_case((s), rwlock, prop), \
+ __seqprop_case((s), mutex, prop), \
+ __seqprop_case((s), ww_mutex, prop))
+
+#define __seqcount_ptr(s) __seqprop(s, ptr)
+#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
+#define __seqcount_assert_lock_held(s) __seqprop(s, assert)
+
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
@@ -122,7 +264,10 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned __read_seqcount_begin(const seqcount_t *s)
+#define __read_seqcount_begin(s) \
+ __read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
{
unsigned ret;
@@ -138,32 +283,38 @@ repeat:
/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
+#define raw_read_seqcount_begin(s) \
+ raw_read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
{
- unsigned ret = __read_seqcount_begin(s);
+ unsigned ret = __read_seqcount_t_begin(s);
smp_rmb();
return ret;
}
/**
* read_seqcount_begin() - begin a seqcount_t read critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned read_seqcount_begin(const seqcount_t *s)
+#define read_seqcount_begin(s) \
+ read_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
{
seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_begin(s);
+ return raw_read_seqcount_t_begin(s);
}
/**
* raw_read_seqcount() - read the raw seqcount_t counter value
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* raw_read_seqcount opens a read critical section of the given
* seqcount_t, without any lockdep checking, and without checking or
@@ -172,7 +323,10 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_read_seqcount(const seqcount_t *s)
+#define raw_read_seqcount(s) \
+ raw_read_seqcount_t(__seqcount_ptr(s))
+
+static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
{
unsigned ret = READ_ONCE(s->sequence);
smp_rmb();
@@ -183,7 +337,7 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
* lockdep and w/o counter stabilization
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* raw_seqcount_begin opens a read critical section of the given
* seqcount_t. Unlike read_seqcount_begin(), this function will not wait
@@ -197,18 +351,21 @@ static inline unsigned raw_read_seqcount(const seqcount_t *s)
*
* Return: count to be passed to read_seqcount_retry()
*/
-static inline unsigned raw_seqcount_begin(const seqcount_t *s)
+#define raw_seqcount_begin(s) \
+ raw_seqcount_t_begin(__seqcount_ptr(s))
+
+static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
{
/*
* If the counter is odd, let read_seqcount_retry() fail
* by decrementing the counter.
*/
- return raw_read_seqcount(s) & ~1;
+ return raw_read_seqcount_t(s) & ~1;
}
/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
* @start: count, from read_seqcount_begin()
*
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
@@ -221,7 +378,10 @@ static inline unsigned raw_seqcount_begin(const seqcount_t *s)
*
* Return: true if a read section retry is required, else false
*/
-static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define __read_seqcount_retry(s, start) \
+ __read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int __read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
kcsan_atomic_next(0);
return unlikely(READ_ONCE(s->sequence) != start);
@@ -229,7 +389,7 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
/**
* read_seqcount_retry() - end a seqcount_t read critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
* @start: count, from read_seqcount_begin()
*
* read_seqcount_retry closes the read critical section of given
@@ -238,17 +398,28 @@ static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
*
* Return: true if a read section retry is required, else false
*/
-static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
+#define read_seqcount_retry(s, start) \
+ read_seqcount_t_retry(__seqcount_ptr(s), start)
+
+static inline int read_seqcount_t_retry(const seqcount_t *s, unsigned start)
{
smp_rmb();
- return __read_seqcount_retry(s, start);
+ return __read_seqcount_t_retry(s, start);
}
/**
* raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*/
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+#define raw_write_seqcount_begin(s) \
+do { \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ raw_write_seqcount_t_begin(__seqcount_ptr(s)); \
+} while (0)
+
+static inline void raw_write_seqcount_t_begin(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -257,49 +428,50 @@ static inline void raw_write_seqcount_begin(seqcount_t *s)
/**
* raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*/
-static inline void raw_write_seqcount_end(seqcount_t *s)
+#define raw_write_seqcount_end(s) \
+do { \
+ raw_write_seqcount_t_end(__seqcount_ptr(s)); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void raw_write_seqcount_t_end(seqcount_t *s)
{
smp_wmb();
s->sequence++;
kcsan_nestable_atomic_end();
}
-static inline void __write_seqcount_begin_nested(seqcount_t *s, int subclass)
-{
- raw_write_seqcount_begin(s);
- seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
-}
-
/**
* write_seqcount_begin_nested() - start a seqcount_t write section with
* custom lockdep nesting level
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
* @subclass: lockdep nesting level
*
* See Documentation/locking/lockdep-design.rst
*/
-static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
+#define write_seqcount_begin_nested(s, subclass) \
+do { \
+ __seqcount_assert_lock_held(s); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ write_seqcount_t_begin_nested(__seqcount_ptr(s), subclass); \
+} while (0)
+
+static inline void write_seqcount_t_begin_nested(seqcount_t *s, int subclass)
{
- lockdep_assert_preemption_disabled();
- __write_seqcount_begin_nested(s, subclass);
-}
-
-/*
- * A write_seqcount_begin() variant w/o lockdep non-preemptibility checks.
- *
- * Use for internal seqlock.h code where it's known that preemption is
- * already disabled. For example, seqlock_t write side functions.
- */
-static inline void __write_seqcount_begin(seqcount_t *s)
-{
- __write_seqcount_begin_nested(s, 0);
+ raw_write_seqcount_t_begin(s);
+ seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
}
/**
* write_seqcount_begin() - start a seqcount_t write side critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* write_seqcount_begin opens a write side critical section of the given
* seqcount_t.
@@ -308,26 +480,44 @@ static inline void __write_seqcount_begin(seqcount_t *s)
* non-preemptible. If readers can be invoked from hardirq or softirq
* context, interrupts or bottom halves must be respectively disabled.
*/
-static inline void write_seqcount_begin(seqcount_t *s)
+#define write_seqcount_begin(s) \
+do { \
+ __seqcount_assert_lock_held(s); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_disable(); \
+ \
+ write_seqcount_t_begin(__seqcount_ptr(s)); \
+} while (0)
+
+static inline void write_seqcount_t_begin(seqcount_t *s)
{
- write_seqcount_begin_nested(s, 0);
+ write_seqcount_t_begin_nested(s, 0);
}
/**
* write_seqcount_end() - end a seqcount_t write side critical section
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* The write section must've been opened with write_seqcount_begin().
*/
-static inline void write_seqcount_end(seqcount_t *s)
+#define write_seqcount_end(s) \
+do { \
+ write_seqcount_t_end(__seqcount_ptr(s)); \
+ \
+ if (__seqcount_lock_preemptible(s)) \
+ preempt_enable(); \
+} while (0)
+
+static inline void write_seqcount_t_end(seqcount_t *s)
{
seqcount_release(&s->dep_map, _RET_IP_);
- raw_write_seqcount_end(s);
+ raw_write_seqcount_t_end(s);
}
/**
* raw_write_seqcount_barrier() - do a seqcount_t write barrier
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* This can be used to provide an ordering guarantee instead of the usual
* consistency guarantee. It is one wmb cheaper, because it can collapse
@@ -366,7 +556,10 @@ static inline void write_seqcount_end(seqcount_t *s)
* WRITE_ONCE(X, false);
* }
*/
-static inline void raw_write_seqcount_barrier(seqcount_t *s)
+#define raw_write_seqcount_barrier(s) \
+ raw_write_seqcount_t_barrier(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_barrier(seqcount_t *s)
{
kcsan_nestable_atomic_begin();
s->sequence++;
@@ -378,12 +571,15 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s)
/**
* write_seqcount_invalidate() - invalidate in-progress seqcount_t read
* side operations
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* After write_seqcount_invalidate, no seqcount_t read side operations
* will complete successfully and see data older than this.
*/
-static inline void write_seqcount_invalidate(seqcount_t *s)
+#define write_seqcount_invalidate(s) \
+ write_seqcount_t_invalidate(__seqcount_ptr(s))
+
+static inline void write_seqcount_t_invalidate(seqcount_t *s)
{
smp_wmb();
kcsan_nestable_atomic_begin();
@@ -393,7 +589,7 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
/**
* raw_read_seqcount_latch() - pick even/odd seqcount_t latch data copy
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* Use seqcount_t latching to switch between two storage places protected
* by a sequence counter. Doing so allows having interruptible, preemptible,
@@ -406,7 +602,10 @@ static inline void write_seqcount_invalidate(seqcount_t *s)
* picking which data copy to read. The full counter value must then be
* checked with read_seqcount_retry().
*/
-static inline int raw_read_seqcount_latch(seqcount_t *s)
+#define raw_read_seqcount_latch(s) \
+ raw_read_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline int raw_read_seqcount_t_latch(seqcount_t *s)
{
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
int seq = READ_ONCE(s->sequence); /* ^^^ */
@@ -415,7 +614,7 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
/**
* raw_write_seqcount_latch() - redirect readers to even/odd copy
- * @s: Pointer to seqcount_t
+ * @s: Pointer to seqcount_t or any of the seqcount_locktype_t variants
*
* The latch technique is a multiversion concurrency control method that allows
* queries during non-atomic modifications. If you can guarantee queries never
@@ -494,7 +693,10 @@ static inline int raw_read_seqcount_latch(seqcount_t *s)
* When data is a dynamic data structure; one should use regular RCU
* patterns to manage the lifetimes of the objects within.
*/
-static inline void raw_write_seqcount_latch(seqcount_t *s)
+#define raw_write_seqcount_latch(s) \
+ raw_write_seqcount_t_latch(__seqcount_ptr(s))
+
+static inline void raw_write_seqcount_t_latch(seqcount_t *s)
{
smp_wmb(); /* prior stores before incrementing "sequence" */
s->sequence++;
@@ -516,20 +718,20 @@ typedef struct {
spinlock_t lock;
} seqlock_t;
-#define __SEQLOCK_UNLOCKED(lockname) \
- { \
- .seqcount = SEQCNT_ZERO(lockname), \
- .lock = __SPIN_LOCK_UNLOCKED(lockname) \
+#define __SEQLOCK_UNLOCKED(lockname) \
+ { \
+ .seqcount = SEQCNT_ZERO(lockname), \
+ .lock = __SPIN_LOCK_UNLOCKED(lockname) \
}
/**
* seqlock_init() - dynamic initializer for seqlock_t
* @sl: Pointer to the seqlock_t instance
*/
-#define seqlock_init(sl) \
- do { \
- seqcount_init(&(sl)->seqcount); \
- spin_lock_init(&(sl)->lock); \
+#define seqlock_init(sl) \
+ do { \
+ seqcount_init(&(sl)->seqcount); \
+ spin_lock_init(&(sl)->lock); \
} while (0)
/**
@@ -592,7 +794,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
/**
@@ -604,7 +806,7 @@ static inline void write_seqlock(seqlock_t *sl)
*/
static inline void write_sequnlock(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock(&sl->lock);
}
@@ -618,7 +820,7 @@ static inline void write_sequnlock(seqlock_t *sl)
static inline void write_seqlock_bh(seqlock_t *sl)
{
spin_lock_bh(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
/**
@@ -631,7 +833,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_bh(&sl->lock);
}
@@ -645,7 +847,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
static inline void write_seqlock_irq(seqlock_t *sl)
{
spin_lock_irq(&sl->lock);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
}
/**
@@ -657,7 +859,7 @@ static inline void write_seqlock_irq(seqlock_t *sl)
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_irq(&sl->lock);
}
@@ -666,7 +868,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
- __write_seqcount_begin(&sl->seqcount);
+ write_seqcount_t_begin(&sl->seqcount);
return flags;
}
@@ -695,13 +897,13 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
- write_seqcount_end(&sl->seqcount);
+ write_seqcount_t_end(&sl->seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
}
/**
* read_seqlock_excl() - begin a seqlock_t locking reader section
- * @sl: Pointer to seqlock_t
+ * @sl: Pointer to seqlock_t
*
* read_seqlock_excl opens a seqlock_t locking reader critical section. A
* locking reader exclusively locks out *both* other writers *and* other
diff --git a/include/linux/time.h b/include/linux/time.h
index 4c325bf44ce0..b142cb5f5a53 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -3,7 +3,6 @@
#define _LINUX_TIME_H
# include <linux/cache.h>
-# include <linux/seqlock.h>
# include <linux/math64.h>
# include <linux/time64.h>
diff --git a/include/linux/videodev2.h b/include/linux/videodev2.h
index 16c0ed6c50a7..219037f4c08d 100644
--- a/include/linux/videodev2.h
+++ b/include/linux/videodev2.h
@@ -57,6 +57,7 @@
#define __LINUX_VIDEODEV2_H
#include <linux/time.h> /* need struct timeval */
+#include <linux/kernel.h>
#include <uapi/linux/videodev2.h>
#endif /* __LINUX_VIDEODEV2_H */
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index d7554252404c..850424e5d030 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -48,14 +48,6 @@ struct ww_acquire_ctx {
#endif
};
-struct ww_mutex {
- struct mutex base;
- struct ww_acquire_ctx *ctx;
-#ifdef CONFIG_DEBUG_MUTEXES
- struct ww_class *ww_class;
-#endif
-};
-
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
, .ww_class = class
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c7bfddfc65b0..439379ca9ffa 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -298,7 +298,7 @@ int nf_conntrack_hash_resize(unsigned int hashsize);
extern struct hlist_nulls_head *nf_conntrack_hash;
extern unsigned int nf_conntrack_htable_size;
-extern seqcount_t nf_conntrack_generation;
+extern seqcount_spinlock_t nf_conntrack_generation;
extern unsigned int nf_conntrack_max;
/* must be called with rcu read lock held */