summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/dlm/lock.c7
-rw-r--r--fs/dlm/user.c16
2 files changed, 20 insertions, 3 deletions
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 82e1ac251843..227443218167 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -526,6 +526,7 @@ static int create_lkb(struct dlm_ls *ls, struct dlm_lkb **lkb_ret)
lkb->lkb_nodeid = -1;
lkb->lkb_grmode = DLM_LOCK_IV;
kref_init(&lkb->lkb_ref);
+ INIT_LIST_HEAD(&lkb->lkb_ownqueue);
get_random_bytes(&bucket, sizeof(bucket));
bucket &= (ls->ls_lkbtbl_size - 1);
@@ -3705,7 +3706,7 @@ int dlm_user_unlock(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
goto out_put;
spin_lock(&ua->proc->locks_spin);
- list_del(&lkb->lkb_ownqueue);
+ list_del_init(&lkb->lkb_ownqueue);
spin_unlock(&ua->proc->locks_spin);
/* this removes the reference for the proc->locks list added by
@@ -3749,7 +3750,7 @@ int dlm_user_cancel(struct dlm_ls *ls, struct dlm_user_args *ua_tmp,
/* this lkb was removed from the WAITING queue */
if (lkb->lkb_grmode == DLM_LOCK_IV) {
spin_lock(&ua->proc->locks_spin);
- list_del(&lkb->lkb_ownqueue);
+ list_del_init(&lkb->lkb_ownqueue);
spin_unlock(&ua->proc->locks_spin);
unhold_lkb(lkb);
}
@@ -3817,7 +3818,7 @@ void dlm_clear_proc_locks(struct dlm_ls *ls, struct dlm_user_proc *proc)
unhold_lkb(lkb);
}
- list_del(&lkb->lkb_ownqueue);
+ list_del_init(&lkb->lkb_ownqueue);
if (lkb->lkb_exflags & DLM_LKF_PERSISTENT) {
lkb->lkb_flags |= DLM_IFL_ORPHAN;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 1f05960a916f..fd19caf9af97 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -133,6 +133,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
struct dlm_ls *ls;
struct dlm_user_args *ua;
struct dlm_user_proc *proc;
+ int remove_ownqueue = 0;
/* dlm_clear_proc_locks() sets ORPHAN/DEAD flag on each
lkb before dealing with it. We need to check this
@@ -171,6 +172,14 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
wake_up_interruptible(&proc->wait);
}
+ /* noqueue requests that fail may need to be removed from the
+ proc's locks list, there should be a better way of detecting
+ this situation than checking all these things... */
+
+ if (type == AST_COMP && lkb->lkb_grmode == DLM_LOCK_IV &&
+ ua->lksb.sb_status == -EAGAIN && !list_empty(&lkb->lkb_ownqueue))
+ remove_ownqueue = 1;
+
/* We want to copy the lvb to userspace when the completion
ast is read if the status is 0, the lock has an lvb and
lvb_ops says we should. We could probably have set_lvb_lock()
@@ -185,6 +194,13 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, int type)
ua->update_user_lvb = 0;
spin_unlock(&proc->asts_spin);
+
+ if (remove_ownqueue) {
+ spin_lock(&ua->proc->locks_spin);
+ list_del_init(&lkb->lkb_ownqueue);
+ spin_unlock(&ua->proc->locks_spin);
+ dlm_put_lkb(lkb);
+ }
out:
mutex_unlock(&ls->ls_clear_proc_locks);
}