summaryrefslogtreecommitdiffstats
path: root/fs/orangefs/devorangefs-req.c
diff options
context:
space:
mode:
authorMike Marshall <hubcap@omnibond.com>2015-12-14 20:54:46 +0100
committerMike Marshall <hubcap@omnibond.com>2015-12-14 20:54:46 +0100
commitce6c414e17be602a84b1b34915468f8301ed14a0 (patch)
treef2373d995a4e4ad8f88a8a719df5fb6fb421cb83 /fs/orangefs/devorangefs-req.c
parentOrangefs: de-uglify orangefs_devreq_writev, and devorangefs-req.c in general (diff)
downloadlinux-ce6c414e17be602a84b1b34915468f8301ed14a0.tar.xz
linux-ce6c414e17be602a84b1b34915468f8301ed14a0.zip
Orangefs: Don't wait the old-fashioned way.
Get rid of add_wait_queue, set_current_state, etc, and use the wait_event() model. Signed-off-by: Mike Marshall <hubcap@omnibond.com>
Diffstat (limited to 'fs/orangefs/devorangefs-req.c')
-rw-r--r--fs/orangefs/devorangefs-req.c17
1 files changed, 8 insertions, 9 deletions
diff --git a/fs/orangefs/devorangefs-req.c b/fs/orangefs/devorangefs-req.c
index b182b025db86..dc2e2ce7e943 100644
--- a/fs/orangefs/devorangefs-req.c
+++ b/fs/orangefs/devorangefs-req.c
@@ -432,7 +432,6 @@ static ssize_t orangefs_devreq_writev(struct file *file,
return -EIO;
}
} else {
- /* Change downcall status */
gossip_err("writev: could not vmalloc for trailer!\n");
dev_req_release(buffer);
put_op(op);
@@ -453,7 +452,7 @@ no_trailer:
*/
if (op->upcall.type == ORANGEFS_VFS_OP_FILE_IO) {
int timed_out = 0;
- DECLARE_WAITQUEUE(wait_entry, current);
+ DEFINE_WAIT(wait_entry);
/*
* tell the vfs op waiting on a waitqueue
@@ -463,14 +462,14 @@ no_trailer:
set_op_state_serviced(op);
spin_unlock(&op->lock);
- add_wait_queue_exclusive(&op->io_completion_waitq,
- &wait_entry);
wake_up_interruptible(&op->waitq);
while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
-
spin_lock(&op->lock);
+ prepare_to_wait_exclusive(
+ &op->io_completion_waitq,
+ &wait_entry,
+ TASK_INTERRUPTIBLE);
if (op->io_completed) {
spin_unlock(&op->lock);
break;
@@ -497,9 +496,9 @@ no_trailer:
break;
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&op->io_completion_waitq,
- &wait_entry);
+ spin_lock(&op->lock);
+ finish_wait(&op->io_completion_waitq, &wait_entry);
+ spin_unlock(&op->lock);
/* NOTE: for I/O operations we handle releasing the op
* object except in the case of timeout. the reason we