diff options
author | Trond Myklebust <trondmy@gmail.com> | 2019-04-07 19:58:47 +0200 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2019-04-25 20:18:12 +0200 |
commit | 87150aaed9e55d8b18a94aa2589aa4331429fce8 (patch) | |
tree | f2798dbd3e09d27a0f6a00578eb6b3533d11c829 /net/sunrpc | |
parent | SUNRPC: Refactor xprt_request_wait_receive() (diff) | |
download | linux-87150aaed9e55d8b18a94aa2589aa4331429fce8.tar.xz linux-87150aaed9e55d8b18a94aa2589aa4331429fce8.zip |
SUNRPC: Refactor rpc_sleep_on()
rpc_sleep_on() does not need to set the task->tk_callback under the
queue lock, so move that out.
Also refactor the check for whether the task is active.
Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/sched.c | 40 |
1 files changed, 24 insertions, 16 deletions
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 3d6cb91ba598..8e96a841dd11 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -362,7 +362,6 @@ static void rpc_make_runnable(struct workqueue_struct *wq, */ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, - rpc_action action, unsigned char queue_priority) { dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", @@ -372,27 +371,39 @@ static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, __rpc_add_wait_queue(q, task, queue_priority); - WARN_ON_ONCE(task->tk_callback != NULL); - task->tk_callback = action; __rpc_add_timer(q, task); } -void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, - rpc_action action) +static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action) +{ + if (action && !WARN_ON_ONCE(task->tk_callback != NULL)) + task->tk_callback = action; +} + +static bool rpc_sleep_check_activated(struct rpc_task *task) { /* We shouldn't ever put an inactive task to sleep */ - WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); - if (!RPC_IS_ACTIVATED(task)) { + if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) { task->tk_status = -EIO; rpc_put_task_async(task); - return; + return false; } + return true; +} + +void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, + rpc_action action) +{ + if (!rpc_sleep_check_activated(task)) + return; + + rpc_set_tk_callback(task, action); /* * Protect the queue operations. */ spin_lock_bh(&q->lock); - __rpc_sleep_on_priority(q, task, action, task->tk_priority); + __rpc_sleep_on_priority(q, task, task->tk_priority); spin_unlock_bh(&q->lock); } EXPORT_SYMBOL_GPL(rpc_sleep_on); @@ -400,19 +411,16 @@ EXPORT_SYMBOL_GPL(rpc_sleep_on); void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, rpc_action action, int priority) { - /* We shouldn't ever put an inactive task to sleep */ - WARN_ON_ONCE(!RPC_IS_ACTIVATED(task)); - if (!RPC_IS_ACTIVATED(task)) { - task->tk_status = -EIO; - rpc_put_task_async(task); + if (!rpc_sleep_check_activated(task)) return; - } + + rpc_set_tk_callback(task, action); /* * Protect the queue operations. */ spin_lock_bh(&q->lock); - __rpc_sleep_on_priority(q, task, action, priority - RPC_PRIORITY_LOW); + __rpc_sleep_on_priority(q, task, priority - RPC_PRIORITY_LOW); spin_unlock_bh(&q->lock); } EXPORT_SYMBOL_GPL(rpc_sleep_on_priority); |