summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@primarydata.com>2014-08-03 19:03:11 +0200
committerJ. Bruce Fields <bfields@redhat.com>2014-08-17 18:00:11 +0200
commita4aa8054a60c545f100826271ac9f04c34bf828d (patch)
tree9c9c190c54eecb09aa03e4645528663bc02383b3 /net
parentSUNRPC: get rid of the request wait queue (diff)
downloadlinux-a4aa8054a60c545f100826271ac9f04c34bf828d.tar.xz
linux-a4aa8054a60c545f100826271ac9f04c34bf828d.zip
SUNRPC: Fix broken kthread_should_stop test in svc_get_next_xprt
We should definitely not be exiting svc_get_next_xprt() with the thread enqueued. Fix this by ensuring that we fall through to the dequeue. Also move the test itself outside the spin lock protected section. Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc_xprt.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index faaf2b46273b..5eb6f32df3e5 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -632,7 +632,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
{
struct svc_xprt *xprt;
struct svc_pool *pool = rqstp->rq_pool;
- long time_left;
+ long time_left = 0;
/* Normally we will wait up to 5 seconds for any required
* cache information to be provided.
@@ -665,30 +665,19 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
/* No data pending. Go to sleep */
svc_thread_enqueue(pool, rqstp);
-
- /*
- * checking kthread_should_stop() here allows us to avoid
- * locking and signalling when stopping kthreads that call
- * svc_recv. If the thread has already been woken up, then
- * we can exit here without sleeping. If not, then it
- * it'll be woken up quickly during the schedule_timeout
- */
- if (kthread_should_stop()) {
- set_current_state(TASK_RUNNING);
- xprt = ERR_PTR(-EINTR);
- goto out;
- }
-
spin_unlock_bh(&pool->sp_lock);
- time_left = schedule_timeout(timeout);
- __set_current_state(TASK_RUNNING);
+ if (!(signalled() || kthread_should_stop())) {
+ time_left = schedule_timeout(timeout);
+ __set_current_state(TASK_RUNNING);
- try_to_freeze();
+ try_to_freeze();
- xprt = rqstp->rq_xprt;
- if (xprt != NULL)
- return xprt;
+ xprt = rqstp->rq_xprt;
+ if (xprt != NULL)
+ return xprt;
+ } else
+ __set_current_state(TASK_RUNNING);
spin_lock_bh(&pool->sp_lock);
if (!time_left)