summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2018-06-18 21:55:43 +0200
committerTrond Myklebust <trond.myklebust@hammerspace.com>2018-06-19 14:53:48 +0200
commit0dae72d581dfe795aedaf5523c1faeb18958b1a7 (patch)
tree9804943e0565f51482de71ec7e18a766b8c21985 /net/sunrpc
parentpNFS: Don't send layoutreturn if the layout is already invalid (diff)
downloadlinux-0dae72d581dfe795aedaf5523c1faeb18958b1a7.tar.xz
linux-0dae72d581dfe795aedaf5523c1faeb18958b1a7.zip
sunrpc: Prevent duplicate XID allocation
Krzysztof Kozlowski <krzk@kernel.org> reports that a heavy NFSv4 WRITE workload against a slow NFS server causes his Raspberry Pi clients to stall. Krzysztof bisected it to commit 37ac86c3a76c ("SUNRPC: Initialize rpc_rqst outside of xprt->reserve_lock") . I was able to reproduce similar behavior and it appears that rarely the RPC client layer is re-allocating an XID for an RPC that it has already partially sent. This results in the client ignoring the subsequent reply, which carries the original XID. For various reasons, checking !req->rq_xmit_bytes_sent in xprt_prepare_transmit is not a 100% reliable mechanism for determining when a fresh XID is needed. Trond's preference is to allocate the XID at the time each rpc_rqst slot is initialized. This patch should also address a gcc 4.1.2 complaint reported by Geert Uytterhoeven <geert@linux-m68k.org>. Reported-by: Krzysztof Kozlowski <krzk@kernel.org> Fixes: 37ac86c3a76c ("SUNRPC: Initialize rpc_rqst outside of ... ") Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Krzysztof Kozlowski <krzk@kernel.org> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprt.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c85af058227..3fabf9f6a0f9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
task->tk_status = -EAGAIN;
goto out_unlock;
}
- if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
- req->rq_xid = xprt_alloc_xid(xprt);
ret = true;
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
@@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task)
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
{
- return (__force __be32)xprt->xid++;
+ __be32 xid;
+
+ spin_lock(&xprt->reserve_lock);
+ xid = (__force __be32)xprt->xid++;
+ spin_unlock(&xprt->reserve_lock);
+ return xid;
}
static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task)
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_buffer = NULL;
+ req->rq_xid = xprt_alloc_xid(xprt);
req->rq_connect_cookie = xprt->connect_cookie - 1;
req->rq_bytes_sent = 0;
req->rq_snd_buf.len = 0;