summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2013-04-26 17:37:29 +0200
committerJ. Bruce Fields <bfields@redhat.com>2013-04-26 17:37:43 +0200
commitc85b03ab200f6d2c2f80588d96d03c1f8fcaedc3 (patch)
tree34f4f3200ef5db6ca93360879fffda050c04e1ca /net/sunrpc
parentnfsd: Decode and send 64bit time values (diff)
parentNFSv4: Ensure that we clear the NFS_OPEN_STATE flag when appropriate (diff)
downloadlinux-c85b03ab200f6d2c2f80588d96d03c1f8fcaedc3.tar.xz
linux-c85b03ab200f6d2c2f80588d96d03c1f8fcaedc3.zip
Merge Trond's nfs-for-next
Merging Trond's nfs-for-next branch, mainly to get b7993cebb841b0da7a33e9d5ce301a9fd3209165 "SUNRPC: Allow rpc_create() to request that TCP slots be unlimited", which a small piece of the gss-proxy work depends on.
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/Kconfig1
-rw-r--r--net/sunrpc/clnt.c43
-rw-r--r--net/sunrpc/sched.c9
-rw-r--r--net/sunrpc/xprt.c61
-rw-r--r--net/sunrpc/xprtsock.c14
5 files changed, 106 insertions, 22 deletions
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig
index 516fe2caac2c..262caf03bd5f 100644
--- a/net/sunrpc/Kconfig
+++ b/net/sunrpc/Kconfig
@@ -24,7 +24,6 @@ config SUNRPC_XPRT_RDMA
config SUNRPC_SWAP
bool
depends on SUNRPC
- select NETVM
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index dcc446e7fbf6..651245aa829a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -414,6 +414,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
};
char servername[48];
+ if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
+ xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
/*
* If the caller chooses not to specify a hostname, whip
* up a string representation of the passed-in address.
@@ -1306,6 +1308,8 @@ call_reserve(struct rpc_task *task)
xprt_reserve(task);
}
+static void call_retry_reserve(struct rpc_task *task);
+
/*
* 1b. Grok the result of xprt_reserve()
*/
@@ -1347,7 +1351,7 @@ call_reserveresult(struct rpc_task *task)
case -ENOMEM:
rpc_delay(task, HZ >> 2);
case -EAGAIN: /* woken up; retry */
- task->tk_action = call_reserve;
+ task->tk_action = call_retry_reserve;
return;
case -EIO: /* probably a shutdown */
break;
@@ -1360,6 +1364,19 @@ call_reserveresult(struct rpc_task *task)
}
/*
+ * 1c. Retry reserving an RPC call slot
+ */
+static void
+call_retry_reserve(struct rpc_task *task)
+{
+ dprint_status(task);
+
+ task->tk_status = 0;
+ task->tk_action = call_reserveresult;
+ xprt_retry_reserve(task);
+}
+
+/*
* 2. Bind and/or refresh the credentials
*/
static void
@@ -1644,22 +1661,26 @@ call_connect_status(struct rpc_task *task)
dprint_status(task);
- task->tk_status = 0;
- if (status >= 0 || status == -EAGAIN) {
- clnt->cl_stats->netreconn++;
- task->tk_action = call_transmit;
- return;
- }
-
trace_rpc_connect_status(task, status);
switch (status) {
/* if soft mounted, test if we've timed out */
case -ETIMEDOUT:
task->tk_action = call_timeout;
- break;
- default:
- rpc_exit(task, -EIO);
+ return;
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+ case -ENETUNREACH:
+ if (RPC_IS_SOFTCONN(task))
+ break;
+ /* retry with existing socket, after a delay */
+ case 0:
+ case -EAGAIN:
+ task->tk_status = 0;
+ clnt->cl_stats->netreconn++;
+ task->tk_action = call_transmit;
+ return;
}
+ rpc_exit(task, status);
}
/*
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fb20f25ddec9..f8529fc8e542 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue;
queue->qlen++;
+ /* barrier matches the read in rpc_wake_up_task_queue_locked() */
+ smp_wmb();
rpc_set_queued(task);
dprintk("RPC: %5u added to queue %p \"%s\"\n",
@@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task
*/
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
- if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
- __rpc_do_wake_up_task(queue, task);
+ if (RPC_IS_QUEUED(task)) {
+ smp_rmb();
+ if (task->tk_waitqueue == queue)
+ __rpc_do_wake_up_task(queue, task);
+ }
}
/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index b7478d5e7ffd..745fca3cfd36 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -948,6 +948,34 @@ void xprt_transmit(struct rpc_task *task)
spin_unlock_bh(&xprt->transport_lock);
}
+static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ set_bit(XPRT_CONGESTED, &xprt->state);
+ rpc_sleep_on(&xprt->backlog, task, NULL);
+}
+
+static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
+{
+ if (rpc_wake_up_next(&xprt->backlog) == NULL)
+ clear_bit(XPRT_CONGESTED, &xprt->state);
+}
+
+static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+ bool ret = false;
+
+ if (!test_bit(XPRT_CONGESTED, &xprt->state))
+ goto out;
+ spin_lock(&xprt->reserve_lock);
+ if (test_bit(XPRT_CONGESTED, &xprt->state)) {
+ rpc_sleep_on(&xprt->backlog, task, NULL);
+ ret = true;
+ }
+ spin_unlock(&xprt->reserve_lock);
+out:
+ return ret;
+}
+
static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt, gfp_t gfp_flags)
{
struct rpc_rqst *req = ERR_PTR(-EAGAIN);
@@ -992,7 +1020,7 @@ void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
task->tk_status = -ENOMEM;
break;
case -EAGAIN:
- rpc_sleep_on(&xprt->backlog, task, NULL);
+ xprt_add_backlog(xprt, task);
dprintk("RPC: waiting for request slot\n");
default:
task->tk_status = -EAGAIN;
@@ -1028,7 +1056,7 @@ static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
memset(req, 0, sizeof(*req)); /* mark unused */
list_add(&req->rq_list, &xprt->free);
}
- rpc_wake_up_next(&xprt->backlog);
+ xprt_wake_up_backlog(xprt);
spin_unlock(&xprt->reserve_lock);
}
@@ -1092,7 +1120,8 @@ EXPORT_SYMBOL_GPL(xprt_free);
* xprt_reserve - allocate an RPC request slot
* @task: RPC task requesting a slot allocation
*
- * If no more slots are available, place the task on the transport's
+ * If the transport is marked as being congested, or if no more
+ * slots are available, place the task on the transport's
* backlog queue.
*/
void xprt_reserve(struct rpc_task *task)
@@ -1107,6 +1136,32 @@ void xprt_reserve(struct rpc_task *task)
task->tk_status = -EAGAIN;
rcu_read_lock();
xprt = rcu_dereference(task->tk_client->cl_xprt);
+ if (!xprt_throttle_congested(xprt, task))
+ xprt->ops->alloc_slot(xprt, task);
+ rcu_read_unlock();
+}
+
+/**
+ * xprt_retry_reserve - allocate an RPC request slot
+ * @task: RPC task requesting a slot allocation
+ *
+ * If no more slots are available, place the task on the transport's
+ * backlog queue.
+ * Note that the only difference with xprt_reserve is that we now
+ * ignore the value of the XPRT_CONGESTED flag.
+ */
+void xprt_retry_reserve(struct rpc_task *task)
+{
+ struct rpc_xprt *xprt;
+
+ task->tk_status = 0;
+ if (task->tk_rqstp != NULL)
+ return;
+
+ task->tk_timeout = 0;
+ task->tk_status = -EAGAIN;
+ rcu_read_lock();
+ xprt = rcu_dereference(task->tk_client->cl_xprt);
xprt->ops->alloc_slot(xprt, task);
rcu_read_unlock();
}
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 3d02130828da..9c2825827dec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2207,10 +2207,6 @@ static void xs_tcp_setup_socket(struct work_struct *work)
*/
xs_tcp_force_close(xprt);
break;
- case -ECONNREFUSED:
- case -ECONNRESET:
- case -ENETUNREACH:
- /* retry with existing socket, after a delay */
case 0:
case -EINPROGRESS:
case -EALREADY:
@@ -2221,6 +2217,10 @@ static void xs_tcp_setup_socket(struct work_struct *work)
/* Happens, for instance, if the user specified a link
* local IPv6 address without a scope-id.
*/
+ case -ECONNREFUSED:
+ case -ECONNRESET:
+ case -ENETUNREACH:
+ /* retry with existing socket, after a delay */
goto out;
}
out_eagain:
@@ -2767,9 +2767,13 @@ static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args)
struct rpc_xprt *xprt;
struct sock_xprt *transport;
struct rpc_xprt *ret;
+ unsigned int max_slot_table_size = xprt_max_tcp_slot_table_entries;
+
+ if (args->flags & XPRT_CREATE_INFINITE_SLOTS)
+ max_slot_table_size = RPC_MAX_SLOT_TABLE_LIMIT;
xprt = xs_setup_xprt(args, xprt_tcp_slot_table_entries,
- xprt_max_tcp_slot_table_entries);
+ max_slot_table_size);
if (IS_ERR(xprt))
return xprt;
transport = container_of(xprt, struct sock_xprt, xprt);