summaryrefslogtreecommitdiffstats
path: root/net/rxrpc/conn_service.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2020-01-24 11:21:15 +0100
committerDavid Howells <dhowells@redhat.com>2022-12-01 14:36:42 +0100
commit3dd9c8b5f09fd24652729a3da5c5efa3ec2c4590 (patch)
tree7325670c28397dae62cf553efe6fba29ac39e921 /net/rxrpc/conn_service.c
parentrxrpc: Make the I/O thread take over the call and local processor work (diff)
downloadlinux-3dd9c8b5f09fd24652729a3da5c5efa3ec2c4590.tar.xz
linux-3dd9c8b5f09fd24652729a3da5c5efa3ec2c4590.zip
rxrpc: Remove the _bh annotation from all the spinlocks
None of the spinlocks in rxrpc need a _bh annotation now as the RCU callback routines no longer take spinlocks and the bulk of the packet wrangling code is now run in the I/O thread, not softirq context. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org
Diffstat (limited to 'net/rxrpc/conn_service.c')
-rw-r--r--net/rxrpc/conn_service.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
index b5ae7c753fc3..2a55a88b2a5b 100644
--- a/net/rxrpc/conn_service.c
+++ b/net/rxrpc/conn_service.c
@@ -73,7 +73,7 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
struct rxrpc_conn_proto k = conn->proto;
struct rb_node **pp, *parent;
- write_seqlock_bh(&peer->service_conn_lock);
+ write_seqlock(&peer->service_conn_lock);
pp = &peer->service_conns.rb_node;
parent = NULL;
@@ -94,14 +94,14 @@ static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
rb_insert_color(&conn->service_node, &peer->service_conns);
conn_published:
set_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags);
- write_sequnlock_bh(&peer->service_conn_lock);
+ write_sequnlock(&peer->service_conn_lock);
_leave(" = %d [new]", conn->debug_id);
return;
found_extant_conn:
if (refcount_read(&cursor->ref) == 0)
goto replace_old_connection;
- write_sequnlock_bh(&peer->service_conn_lock);
+ write_sequnlock(&peer->service_conn_lock);
/* We should not be able to get here. rxrpc_incoming_connection() is
* called in a non-reentrant context, so there can't be a race to
* insert a new connection.
@@ -195,8 +195,8 @@ void rxrpc_unpublish_service_conn(struct rxrpc_connection *conn)
{
struct rxrpc_peer *peer = conn->peer;
- write_seqlock_bh(&peer->service_conn_lock);
+ write_seqlock(&peer->service_conn_lock);
if (test_and_clear_bit(RXRPC_CONN_IN_SERVICE_CONNS, &conn->flags))
rb_erase(&conn->service_node, &peer->service_conns);
- write_sequnlock_bh(&peer->service_conn_lock);
+ write_sequnlock(&peer->service_conn_lock);
}