summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2022-03-07 00:41:44 +0100
committerTrond Myklebust <trond.myklebust@hammerspace.com>2022-03-13 17:59:35 +0100
commitc487216bec83b0c5a8803e5c61433d33ad7b104d (patch)
treee7dba88508d6c3271c4d8e62673b33b6f7c06988 /net
parentNFS: remove IS_SWAPFILE hack (diff)
downloadlinux-c487216bec83b0c5a8803e5c61433d33ad7b104d.tar.xz
linux-c487216bec83b0c5a8803e5c61433d33ad7b104d.zip
SUNRPC/call_alloc: async tasks mustn't block waiting for memory
When memory is short, new worker threads cannot be created and we depend on the minimum one rpciod thread to be able to handle everything. So it must not block waiting for memory. mempools are particularly a problem as memory can only be released back to the mempool by an async rpc task running. If all available workqueue threads are waiting on the mempool, no thread is available to return anything. rpc_malloc() can block, and this might cause deadlocks. So check RPC_IS_ASYNC(), rather than RPC_IS_SWAPPER() to determine if blocking is acceptable. Signed-off-by: NeilBrown <neilb@suse.de> Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/sched.c4
-rw-r--r--net/sunrpc/xprtrdma/transport.c4
2 files changed, 6 insertions, 2 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 52769b883c0a..e5b07562ba45 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -1023,8 +1023,10 @@ int rpc_malloc(struct rpc_task *task)
struct rpc_buffer *buf;
gfp_t gfp = GFP_KERNEL;
+ if (RPC_IS_ASYNC(task))
+ gfp = GFP_NOWAIT | __GFP_NOWARN;
if (RPC_IS_SWAPPER(task))
- gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
+ gfp |= __GFP_MEMALLOC;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 42e375dbdadb..5714bf880e95 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -570,8 +570,10 @@ xprt_rdma_allocate(struct rpc_task *task)
gfp_t flags;
flags = RPCRDMA_DEF_GFP;
+ if (RPC_IS_ASYNC(task))
+ flags = GFP_NOWAIT | __GFP_NOWARN;
if (RPC_IS_SWAPPER(task))
- flags = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
+ flags |= __GFP_MEMALLOC;
if (!rpcrdma_check_regbuf(r_xprt, req->rl_sendbuf, rqst->rq_callsize,
flags))