diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2015-05-26 17:53:13 +0200 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2015-06-12 19:10:37 +0200 |
commit | 58d1dcf5a8ebb0ce8a521286a99efdd636012bf0 (patch) | |
tree | 8fcc6ca4d9a5f1234f9f73acdd0fa0bd20a5e06c /net | |
parent | xprtrdma: Remove rpcrdma_ia::ri_memreg_strategy (diff) | |
download | linux-58d1dcf5a8ebb0ce8a521286a99efdd636012bf0.tar.xz linux-58d1dcf5a8ebb0ce8a521286a99efdd636012bf0.zip |
xprtrdma: Split rb_lock
/proc/lock_stat showed contention between rpcrdma_buffer_get/put
and the MR allocation functions during I/O intensive workloads.
Now that MRs are no longer allocated in rpcrdma_buffer_get(),
there's no reason the rb_mws list has to be managed using the
same lock as the send/receive buffers. Split that lock. The
new lock does not need to disable interrupts because buffer
get/put is never called in an interrupt context.
struct rpcrdma_buffer is re-arranged to ensure rb_mwlock and rb_mws
are always in a different cacheline than rb_lock and the buffer
pointers.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Reviewed-by: Sagi Grimberg <sagig@mellanox.com>
Tested-By: Devesh Sharma <devesh.sharma@avagotech.com>
Reviewed-by: Doug Ledford <dledford@redhat.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/xprtrdma/fmr_ops.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/frwr_ops.c | 1 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 10 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/xprt_rdma.h | 16 |
4 files changed, 15 insertions, 13 deletions
diff --git a/net/sunrpc/xprtrdma/fmr_ops.c b/net/sunrpc/xprtrdma/fmr_ops.c index 5dd77dac094c..52f9ad5fe19b 100644 --- a/net/sunrpc/xprtrdma/fmr_ops.c +++ b/net/sunrpc/xprtrdma/fmr_ops.c @@ -65,6 +65,7 @@ fmr_op_init(struct rpcrdma_xprt *r_xprt) struct rpcrdma_mw *r; int i, rc; + spin_lock_init(&buf->rb_mwlock); INIT_LIST_HEAD(&buf->rb_mws); INIT_LIST_HEAD(&buf->rb_all); diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c index 862279267fb8..18b7305d249f 100644 --- a/net/sunrpc/xprtrdma/frwr_ops.c +++ b/net/sunrpc/xprtrdma/frwr_ops.c @@ -266,6 +266,7 @@ frwr_op_init(struct rpcrdma_xprt *r_xprt) struct ib_pd *pd = r_xprt->rx_ia.ri_pd; int i; + spin_lock_init(&buf->rb_mwlock); INIT_LIST_HEAD(&buf->rb_mws); INIT_LIST_HEAD(&buf->rb_all); diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index cc1a52609974..234083560d0e 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -1173,15 +1173,14 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) { struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_mw *mw = NULL; - unsigned long flags; - spin_lock_irqsave(&buf->rb_lock, flags); + spin_lock(&buf->rb_mwlock); if (!list_empty(&buf->rb_mws)) { mw = list_first_entry(&buf->rb_mws, struct rpcrdma_mw, mw_list); list_del_init(&mw->mw_list); } - spin_unlock_irqrestore(&buf->rb_lock, flags); + spin_unlock(&buf->rb_mwlock); if (!mw) pr_err("RPC: %s: no MWs available\n", __func__); @@ -1192,11 +1191,10 @@ void rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) { struct rpcrdma_buffer *buf = &r_xprt->rx_buf; - unsigned long flags; - spin_lock_irqsave(&buf->rb_lock, flags); + spin_lock(&buf->rb_mwlock); list_add_tail(&mw->mw_list, &buf->rb_mws); - spin_unlock_irqrestore(&buf->rb_lock, flags); + spin_unlock(&buf->rb_mwlock); } static void diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 3ecee38bf1a0..df92884400c4 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h @@ -282,15 +282,17 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) * One of these is associated with a transport instance */ struct rpcrdma_buffer { - spinlock_t rb_lock; /* protects indexes */ - u32 rb_max_requests;/* client max requests */ - struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */ - struct list_head rb_all; - int rb_send_index; + spinlock_t rb_mwlock; /* protect rb_mws list */ + struct list_head rb_mws; + struct list_head rb_all; + char *rb_pool; + + spinlock_t rb_lock; /* protect buf arrays */ + u32 rb_max_requests; + int rb_send_index; + int rb_recv_index; struct rpcrdma_req **rb_send_bufs; - int rb_recv_index; struct rpcrdma_rep **rb_recv_bufs; - char *rb_pool; }; #define rdmab_to_ia(b) (&container_of((b), struct rpcrdma_xprt, rx_buf)->rx_ia) |