summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2018-01-26 00:36:38 +0100
committerDavid S. Miller <davem@davemloft.net>2018-01-29 18:02:54 +0100
commita07d29c6724a19eab120b7a74a9bfd107d20f69a (patch)
treebbfcf81f49ba2ddfc7e1ab33156dbb95757c1466 /include
parentskb_array: use __ptr_ring_empty (diff)
downloadlinux-a07d29c6724a19eab120b7a74a9bfd107d20f69a.tar.xz
linux-a07d29c6724a19eab120b7a74a9bfd107d20f69a.zip
ptr_ring: prevent queue load/store tearing
In theory compiler could tear queue loads or stores in two. It does not seem to be happening in practice but it seems easier to convert the cases where this would be a problem to READ/WRITE_ONCE than worry about it. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/ptr_ring.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 3a19ebdcef14..1883d6137e9b 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -114,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr)
/* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */
smp_wmb();
- r->queue[r->producer++] = ptr;
+ WRITE_ONCE(r->queue[r->producer++], ptr);
if (unlikely(r->producer >= r->size))
r->producer = 0;
return 0;
@@ -173,7 +173,7 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
- return r->queue[r->consumer_head];
+ return READ_ONCE(r->queue[r->consumer_head]);
return NULL;
}