summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-13 18:58:42 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-13 18:58:42 +0200
commit66b834558537c623135241e1ea9ddf11b31596e4 (patch)
treeb8e6b078f552fce698709c41ad20c1d194057d6b /net
parentMerge tag 'for-linus-6.1-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff)
parentNFSv4/flexfiles: Cancel I/O if the layout is recalled or revoked (diff)
downloadlinux-66b834558537c623135241e1ea9ddf11b31596e4.tar.xz
linux-66b834558537c623135241e1ea9ddf11b31596e4.zip
Merge tag 'nfs-for-6.1-1' of git://git.linux-nfs.org/projects/anna/linux-nfs
Pull NFS client updates from Anna Schumaker: "New Features: - Add NFSv4.2 xattr tracepoints - Replace xprtiod WQ in rpcrdma - Flexfiles cancels I/O on layout recall or revoke Bugfixes and Cleanups: - Directly use ida_alloc() / ida_free() - Don't open-code max_t() - Prefer using strscpy over strlcpy - Remove unused forward declarations - Always return layout states on flexfiles layout return - Have LISTXATTR treat NFS4ERR_NOXATTR as an empty reply instead of error - Allow more xprtrdma memory allocations to fail without triggering a reclaim - Various other xprtrdma clean ups - Fix rpc_killall_tasks() races" * tag 'nfs-for-6.1-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (27 commits) NFSv4/flexfiles: Cancel I/O if the layout is recalled or revoked SUNRPC: Add API to force the client to disconnect SUNRPC: Add a helper to allow pNFS drivers to selectively cancel RPC calls SUNRPC: Fix races with rpc_killall_tasks() xprtrdma: Fix uninitialized variable xprtrdma: Prevent memory allocations from driving a reclaim xprtrdma: Memory allocation should be allowed to fail during connect xprtrdma: MR-related memory allocation should be allowed to fail xprtrdma: Clean up synopsis of rpcrdma_regbuf_alloc() xprtrdma: Clean up synopsis of rpcrdma_req_create() svcrdma: Clean up RPCRDMA_DEF_GFP SUNRPC: Replace the use of the xprtiod WQ in rpcrdma NFSv4.2: Add a tracepoint for listxattr NFSv4.2: Add tracepoints for getxattr, setxattr, and removexattr NFSv4.2: Move TRACE_DEFINE_ENUM(NFS4_CONTENT_*) under CONFIG_NFS_V4_2 NFSv4.2: Add special handling for LISTXATTR receiving NFS4ERR_NOXATTR nfs: remove nfs_wait_atomic_killable() and nfs_write_prepare() declaration NFSv4: remove nfs4_renewd_prepare_shutdown() declaration fs/nfs/pnfs_nfs.c: fix spelling typo and syntax error in comment NFSv4/pNFS: Always return layout stats on layout return for flexfiles ...
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/clnt.c61
-rw-r--r--net/sunrpc/sched.c51
-rw-r--r--net/sunrpc/xprt.c9
-rw-r--r--net/sunrpc/xprtmultipath.c4
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c2
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c20
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c4
-rw-r--r--net/sunrpc/xprtrdma/transport.c3
-rw-r--r--net/sunrpc/xprtrdma/verbs.c52
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h10
-rw-r--r--net/sunrpc/xprtsock.c5
11 files changed, 141 insertions, 80 deletions
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c284efa3d1ef..993acf38af87 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -345,7 +345,7 @@ static int rpc_alloc_clid(struct rpc_clnt *clnt)
{
int clid;
- clid = ida_simple_get(&rpc_clids, 0, 0, GFP_KERNEL);
+ clid = ida_alloc(&rpc_clids, GFP_KERNEL);
if (clid < 0)
return clid;
clnt->cl_clid = clid;
@@ -354,7 +354,7 @@ static int rpc_alloc_clid(struct rpc_clnt *clnt)
static void rpc_free_clid(struct rpc_clnt *clnt)
{
- ida_simple_remove(&rpc_clids, clnt->cl_clid);
+ ida_free(&rpc_clids, clnt->cl_clid);
}
static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args,
@@ -873,6 +873,57 @@ void rpc_killall_tasks(struct rpc_clnt *clnt)
}
EXPORT_SYMBOL_GPL(rpc_killall_tasks);
+/**
+ * rpc_cancel_tasks - try to cancel a set of RPC tasks
+ * @clnt: Pointer to RPC client
+ * @error: RPC task error value to set
+ * @fnmatch: Pointer to selector function
+ * @data: User data
+ *
+ * Uses @fnmatch to define a set of RPC tasks that are to be cancelled.
+ * The argument @error must be a negative error value.
+ */
+unsigned long rpc_cancel_tasks(struct rpc_clnt *clnt, int error,
+ bool (*fnmatch)(const struct rpc_task *,
+ const void *),
+ const void *data)
+{
+ struct rpc_task *task;
+ unsigned long count = 0;
+
+ if (list_empty(&clnt->cl_tasks))
+ return 0;
+ /*
+ * Spin lock all_tasks to prevent changes...
+ */
+ spin_lock(&clnt->cl_lock);
+ list_for_each_entry(task, &clnt->cl_tasks, tk_task) {
+ if (!RPC_IS_ACTIVATED(task))
+ continue;
+ if (!fnmatch(task, data))
+ continue;
+ rpc_task_try_cancel(task, error);
+ count++;
+ }
+ spin_unlock(&clnt->cl_lock);
+ return count;
+}
+EXPORT_SYMBOL_GPL(rpc_cancel_tasks);
+
+static int rpc_clnt_disconnect_xprt(struct rpc_clnt *clnt,
+ struct rpc_xprt *xprt, void *dummy)
+{
+ if (xprt_connected(xprt))
+ xprt_force_disconnect(xprt);
+ return 0;
+}
+
+void rpc_clnt_disconnect(struct rpc_clnt *clnt)
+{
+ rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_disconnect_xprt, NULL);
+}
+EXPORT_SYMBOL_GPL(rpc_clnt_disconnect);
+
/*
* Properly shut down an RPC client, terminating all outstanding
* requests.
@@ -1642,7 +1693,7 @@ static void
__rpc_call_rpcerror(struct rpc_task *task, int tk_status, int rpc_status)
{
trace_rpc_call_rpcerror(task, tk_status, rpc_status);
- task->tk_rpc_status = rpc_status;
+ rpc_task_set_rpc_status(task, rpc_status);
rpc_exit(task, tk_status);
}
@@ -2435,10 +2486,8 @@ rpc_check_timeout(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
- if (RPC_SIGNALLED(task)) {
- rpc_call_rpcerror(task, -ERESTARTSYS);
+ if (RPC_SIGNALLED(task))
return;
- }
if (xprt_adjust_timeout(task->tk_rqstp) == 0)
return;
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 46cbf151a50b..be587a308e05 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -65,6 +65,13 @@ gfp_t rpc_task_gfp_mask(void)
}
EXPORT_SYMBOL_GPL(rpc_task_gfp_mask);
+bool rpc_task_set_rpc_status(struct rpc_task *task, int rpc_status)
+{
+ if (cmpxchg(&task->tk_rpc_status, 0, rpc_status) == 0)
+ return true;
+ return false;
+}
+
unsigned long
rpc_task_timeout(const struct rpc_task *task)
{
@@ -853,12 +860,25 @@ void rpc_signal_task(struct rpc_task *task)
if (!RPC_IS_ACTIVATED(task))
return;
+ if (!rpc_task_set_rpc_status(task, -ERESTARTSYS))
+ return;
trace_rpc_task_signalled(task, task->tk_action);
set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
smp_mb__after_atomic();
queue = READ_ONCE(task->tk_waitqueue);
if (queue)
- rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
+ rpc_wake_up_queued_task(queue, task);
+}
+
+void rpc_task_try_cancel(struct rpc_task *task, int error)
+{
+ struct rpc_wait_queue *queue;
+
+ if (!rpc_task_set_rpc_status(task, error))
+ return;
+ queue = READ_ONCE(task->tk_waitqueue);
+ if (queue)
+ rpc_wake_up_queued_task(queue, task);
}
void rpc_exit(struct rpc_task *task, int status)
@@ -905,10 +925,16 @@ static void __rpc_execute(struct rpc_task *task)
* Perform the next FSM step or a pending callback.
*
* tk_action may be NULL if the task has been killed.
- * In particular, note that rpc_killall_tasks may
- * do this at any time, so beware when dereferencing.
*/
do_action = task->tk_action;
+ /* Tasks with an RPC error status should exit */
+ if (do_action != rpc_exit_task &&
+ (status = READ_ONCE(task->tk_rpc_status)) != 0) {
+ task->tk_status = status;
+ if (do_action != NULL)
+ do_action = rpc_exit_task;
+ }
+ /* Callbacks override all actions */
if (task->tk_callback) {
do_action = task->tk_callback;
task->tk_callback = NULL;
@@ -931,14 +957,6 @@ static void __rpc_execute(struct rpc_task *task)
}
/*
- * Signalled tasks should exit rather than sleep.
- */
- if (RPC_SIGNALLED(task)) {
- task->tk_rpc_status = -ERESTARTSYS;
- rpc_exit(task, -ERESTARTSYS);
- }
-
- /*
* The queue->lock protects against races with
* rpc_make_runnable().
*
@@ -953,6 +971,12 @@ static void __rpc_execute(struct rpc_task *task)
spin_unlock(&queue->lock);
continue;
}
+ /* Wake up any task that has an exit status */
+ if (READ_ONCE(task->tk_rpc_status) != 0) {
+ rpc_wake_up_task_queue_locked(queue, task);
+ spin_unlock(&queue->lock);
+ continue;
+ }
rpc_clear_running(task);
spin_unlock(&queue->lock);
if (task_is_async)
@@ -970,10 +994,7 @@ static void __rpc_execute(struct rpc_task *task)
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
- trace_rpc_task_signalled(task, task->tk_action);
- set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
- task->tk_rpc_status = -ERESTARTSYS;
- rpc_exit(task, -ERESTARTSYS);
+ rpc_signal_task(task);
}
trace_rpc_task_sync_wake(task, task->tk_action);
}
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index f8fae7815649..71dc26373444 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -1788,7 +1788,7 @@ static int xprt_alloc_id(struct rpc_xprt *xprt)
{
int id;
- id = ida_simple_get(&rpc_xprt_ids, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&rpc_xprt_ids, GFP_KERNEL);
if (id < 0)
return id;
@@ -1798,7 +1798,7 @@ static int xprt_alloc_id(struct rpc_xprt *xprt)
static void xprt_free_id(struct rpc_xprt *xprt)
{
- ida_simple_remove(&rpc_xprt_ids, xprt->id);
+ ida_free(&rpc_xprt_ids, xprt->id);
}
struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
@@ -1822,10 +1822,7 @@ struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
goto out_free;
list_add(&req->rq_list, &xprt->free);
}
- if (max_alloc > num_prealloc)
- xprt->max_reqs = max_alloc;
- else
- xprt->max_reqs = num_prealloc;
+ xprt->max_reqs = max_t(unsigned int, max_alloc, num_prealloc);
xprt->min_reqs = num_prealloc;
xprt->num_reqs = num_prealloc;
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c
index 685db598acbe..701250b305db 100644
--- a/net/sunrpc/xprtmultipath.c
+++ b/net/sunrpc/xprtmultipath.c
@@ -103,7 +103,7 @@ static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags)
{
int id;
- id = ida_simple_get(&rpc_xprtswitch_ids, 0, 0, gfp_flags);
+ id = ida_alloc(&rpc_xprtswitch_ids, gfp_flags);
if (id < 0)
return id;
@@ -113,7 +113,7 @@ static int xprt_switch_alloc_id(struct rpc_xprt_switch *xps, gfp_t gfp_flags)
static void xprt_switch_free_id(struct rpc_xprt_switch *xps)
{
- ida_simple_remove(&rpc_xprtswitch_ids, xps->xps_id);
+ ida_free(&rpc_xprtswitch_ids, xps->xps_id);
}
/**
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index faba7136dd9a..e4d84a13c566 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -189,7 +189,7 @@ create_req:
return NULL;
size = min_t(size_t, r_xprt->rx_ep->re_inline_recv, PAGE_SIZE);
- req = rpcrdma_req_create(r_xprt, size, GFP_KERNEL);
+ req = rpcrdma_req_create(r_xprt, size);
if (!req)
return NULL;
if (rpcrdma_req_setup(r_xprt, req)) {
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index de0bdb6b729f..ffbf99894970 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -124,16 +124,16 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
unsigned int depth = ep->re_max_fr_depth;
struct scatterlist *sg;
struct ib_mr *frmr;
- int rc;
+
+ sg = kcalloc_node(depth, sizeof(*sg), XPRTRDMA_GFP_FLAGS,
+ ibdev_to_node(ep->re_id->device));
+ if (!sg)
+ return -ENOMEM;
frmr = ib_alloc_mr(ep->re_pd, ep->re_mrtype, depth);
if (IS_ERR(frmr))
goto out_mr_err;
- sg = kmalloc_array(depth, sizeof(*sg), GFP_KERNEL);
- if (!sg)
- goto out_list_err;
-
mr->mr_xprt = r_xprt;
mr->mr_ibmr = frmr;
mr->mr_device = NULL;
@@ -146,13 +146,9 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
return 0;
out_mr_err:
- rc = PTR_ERR(frmr);
- trace_xprtrdma_frwr_alloc(mr, rc);
- return rc;
-
-out_list_err:
- ib_dereg_mr(frmr);
- return -ENOMEM;
+ kfree(sg);
+ trace_xprtrdma_frwr_alloc(mr, PTR_ERR(frmr));
+ return PTR_ERR(frmr);
}
/**
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 85c8cdda98b1..aa2227a7e552 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -119,12 +119,12 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
return -EINVAL;
}
- page = alloc_page(RPCRDMA_DEF_GFP);
+ page = alloc_page(GFP_NOIO | __GFP_NOWARN);
if (!page)
return -ENOMEM;
rqst->rq_buffer = page_address(page);
- rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
+ rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, GFP_NOIO | __GFP_NOWARN);
if (!rqst->rq_rbuffer) {
put_page(page);
return -ENOMEM;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index bcb37b51adf6..10bb2b929c6d 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -494,8 +494,7 @@ xprt_rdma_connect(struct rpc_xprt *xprt, struct rpc_task *task)
xprt_reconnect_backoff(xprt, RPCRDMA_INIT_REEST_TO);
}
trace_xprtrdma_op_connect(r_xprt, delay);
- queue_delayed_work(xprtiod_workqueue, &r_xprt->rx_connect_worker,
- delay);
+ queue_delayed_work(system_long_wq, &r_xprt->rx_connect_worker, delay);
}
/**
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 2fbe9aaeec34..44b87e4274b4 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -76,8 +76,7 @@ static void rpcrdma_mrs_destroy(struct rpcrdma_xprt *r_xprt);
static void rpcrdma_ep_get(struct rpcrdma_ep *ep);
static int rpcrdma_ep_put(struct rpcrdma_ep *ep);
static struct rpcrdma_regbuf *
-rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
- gfp_t flags);
+rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction);
static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
@@ -373,7 +372,7 @@ static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_ep *ep;
int rc;
- ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ ep = kzalloc(sizeof(*ep), XPRTRDMA_GFP_FLAGS);
if (!ep)
return -ENOTCONN;
ep->re_xprt = &r_xprt->rx_xprt;
@@ -606,7 +605,7 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
struct rpcrdma_sendctx *sc;
sc = kzalloc(struct_size(sc, sc_sges, ep->re_attr.cap.max_send_sge),
- GFP_KERNEL);
+ XPRTRDMA_GFP_FLAGS);
if (!sc)
return NULL;
@@ -629,7 +628,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
* Sends are posted.
*/
i = r_xprt->rx_ep->re_max_requests + RPCRDMA_MAX_BC_REQUESTS;
- buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
+ buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), XPRTRDMA_GFP_FLAGS);
if (!buf->rb_sc_ctxs)
return -ENOMEM;
@@ -740,13 +739,16 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_ep *ep = r_xprt->rx_ep;
+ struct ib_device *device = ep->re_id->device;
unsigned int count;
+ /* Try to allocate enough to perform one full-sized I/O */
for (count = 0; count < ep->re_max_rdma_segs; count++) {
struct rpcrdma_mr *mr;
int rc;
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ mr = kzalloc_node(sizeof(*mr), XPRTRDMA_GFP_FLAGS,
+ ibdev_to_node(device));
if (!mr)
break;
@@ -791,38 +793,33 @@ void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
/* If there is no underlying connection, it's no use
* to wake the refresh worker.
*/
- if (ep->re_connect_status == 1) {
- /* The work is scheduled on a WQ_MEM_RECLAIM
- * workqueue in order to prevent MR allocation
- * from recursing into NFS during direct reclaim.
- */
- queue_work(xprtiod_workqueue, &buf->rb_refresh_worker);
- }
+ if (ep->re_connect_status != 1)
+ return;
+ queue_work(system_highpri_wq, &buf->rb_refresh_worker);
}
/**
* rpcrdma_req_create - Allocate an rpcrdma_req object
* @r_xprt: controlling r_xprt
* @size: initial size, in bytes, of send and receive buffers
- * @flags: GFP flags passed to memory allocators
*
* Returns an allocated and fully initialized rpcrdma_req or NULL.
*/
-struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
- gfp_t flags)
+struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
+ size_t size)
{
struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
struct rpcrdma_req *req;
- req = kzalloc(sizeof(*req), flags);
+ req = kzalloc(sizeof(*req), XPRTRDMA_GFP_FLAGS);
if (req == NULL)
goto out1;
- req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE, flags);
+ req->rl_sendbuf = rpcrdma_regbuf_alloc(size, DMA_TO_DEVICE);
if (!req->rl_sendbuf)
goto out2;
- req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE, flags);
+ req->rl_recvbuf = rpcrdma_regbuf_alloc(size, DMA_NONE);
if (!req->rl_recvbuf)
goto out3;
@@ -858,7 +855,7 @@ int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
r_xprt->rx_ep->re_max_rdma_segs * rpcrdma_readchunk_maxsz;
maxhdrsize *= sizeof(__be32);
rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize),
- DMA_TO_DEVICE, GFP_KERNEL);
+ DMA_TO_DEVICE);
if (!rb)
goto out;
@@ -929,12 +926,12 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
struct rpcrdma_rep *rep;
- rep = kzalloc(sizeof(*rep), GFP_KERNEL);
+ rep = kzalloc(sizeof(*rep), XPRTRDMA_GFP_FLAGS);
if (rep == NULL)
goto out;
rep->rr_rdmabuf = rpcrdma_regbuf_alloc(r_xprt->rx_ep->re_inline_recv,
- DMA_FROM_DEVICE, GFP_KERNEL);
+ DMA_FROM_DEVICE);
if (!rep->rr_rdmabuf)
goto out_free;
@@ -1064,8 +1061,8 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
for (i = 0; i < r_xprt->rx_xprt.max_reqs; i++) {
struct rpcrdma_req *req;
- req = rpcrdma_req_create(r_xprt, RPCRDMA_V1_DEF_INLINE_SIZE * 2,
- GFP_KERNEL);
+ req = rpcrdma_req_create(r_xprt,
+ RPCRDMA_V1_DEF_INLINE_SIZE * 2);
if (!req)
goto out;
list_add(&req->rl_list, &buf->rb_send_bufs);
@@ -1235,15 +1232,14 @@ void rpcrdma_buffer_put(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
* or Replies they may be registered externally via frwr_map.
*/
static struct rpcrdma_regbuf *
-rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
- gfp_t flags)
+rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction)
{
struct rpcrdma_regbuf *rb;
- rb = kmalloc(sizeof(*rb), flags);
+ rb = kmalloc(sizeof(*rb), XPRTRDMA_GFP_FLAGS);
if (!rb)
return NULL;
- rb->rg_data = kmalloc(size, flags);
+ rb->rg_data = kmalloc(size, XPRTRDMA_GFP_FLAGS);
if (!rb->rg_data) {
kfree(rb);
return NULL;
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index c79f92eeda76..5e5ff6784ef5 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -149,7 +149,11 @@ static inline void *rdmab_data(const struct rpcrdma_regbuf *rb)
return rb->rg_data;
}
-#define RPCRDMA_DEF_GFP (GFP_NOIO | __GFP_NOWARN)
+/* Do not use emergency memory reserves, and fail quickly if memory
+ * cannot be allocated easily. These flags may be used wherever there
+ * is robust logic to handle a failure to allocate.
+ */
+#define XPRTRDMA_GFP_FLAGS (__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN)
/* To ensure a transport can always make forward progress,
* the number of RDMA segments allowed in header chunk lists
@@ -467,8 +471,8 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp);
/*
* Buffer calls - xprtrdma/verbs.c
*/
-struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt, size_t size,
- gfp_t flags);
+struct rpcrdma_req *rpcrdma_req_create(struct rpcrdma_xprt *r_xprt,
+ size_t size);
int rpcrdma_req_setup(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req);
void rpcrdma_req_destroy(struct rpcrdma_req *req);
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e976007f4fd0..f34d5427b66c 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -261,7 +261,7 @@ static void xs_format_common_peer_addresses(struct rpc_xprt *xprt)
switch (sap->sa_family) {
case AF_LOCAL:
sun = xs_addr_un(xprt);
- strlcpy(buf, sun->sun_path, sizeof(buf));
+ strscpy(buf, sun->sun_path, sizeof(buf));
xprt->address_strings[RPC_DISPLAY_ADDR] =
kstrdup(buf, GFP_KERNEL);
break;
@@ -1978,8 +1978,7 @@ static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
* we'll need to figure out how to pass a namespace to
* connect.
*/
- task->tk_rpc_status = -ENOTCONN;
- rpc_exit(task, -ENOTCONN);
+ rpc_task_set_rpc_status(task, -ENOTCONN);
goto out_wake;
}
ret = xs_local_setup_socket(transport);