summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/dlm/lockspace.c3
-rw-r--r--include/linux/dlm.h17
-rw-r--r--include/uapi/linux/dlm.h2
3 files changed, 21 insertions, 1 deletions
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 8a4351ee9a42..a7ac0fcb4ef3 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -629,6 +629,9 @@ int dlm_new_user_lockspace(const char *name, const char *cluster,
void *ops_arg, int *ops_result,
dlm_lockspace_t **lockspace)
{
+ if (flags & DLM_LSFL_SOFTIRQ)
+ return -EINVAL;
+
return __dlm_new_lockspace(name, cluster, flags, lvblen, ops,
ops_arg, ops_result, lockspace);
}
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index c58c4f790c04..bacda9898f2b 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -35,6 +35,9 @@ struct dlm_lockspace_ops {
int num_slots, int our_slot, uint32_t generation);
};
+/* only relevant for kernel lockspaces, will be removed in future */
+#define DLM_LSFL_SOFTIRQ __DLM_LSFL_RESERVED0
+
/*
* dlm_new_lockspace
*
@@ -55,6 +58,11 @@ struct dlm_lockspace_ops {
* used to select the directory node. Must be the same on all nodes.
* DLM_LSFL_NEWEXCL
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
+ * DLM_LSFL_SOFTIRQ
+ * dlm request callbacks (ast, bast) are softirq safe. Flag should be
+ * preferred by users. Will be default in some future. If set the
+ * strongest context for ast, bast callback is softirq as it avoids
+ * an additional context switch.
*
* lvblen: length of lvb in bytes. Must be multiple of 8.
* dlm_new_lockspace() returns an error if this does not match
@@ -121,7 +129,14 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
* call.
*
* AST routines should not block (at least not for long), but may make
- * any locking calls they please.
+ * any locking calls they please. If DLM_LSFL_SOFTIRQ for kernel
+ * users of dlm_new_lockspace() is passed the ast and bast callbacks
+ * can be processed in softirq context. Also some of the callback
+ * contexts are in the same context as the DLM lock request API, users
+ * must not hold locks while calling dlm lock request API and trying
+ * to acquire this lock in the callback again, this will end in a
+ * lock recursion. For newer implementation the DLM_LSFL_SOFTIRQ
+ * should be used.
*/
int dlm_lock(dlm_lockspace_t *lockspace,
diff --git a/include/uapi/linux/dlm.h b/include/uapi/linux/dlm.h
index e7e905fb0bb2..4eaf835780b0 100644
--- a/include/uapi/linux/dlm.h
+++ b/include/uapi/linux/dlm.h
@@ -71,6 +71,8 @@ struct dlm_lksb {
/* DLM_LSFL_TIMEWARN is deprecated and reserved. DO NOT USE! */
#define DLM_LSFL_TIMEWARN 0x00000002
#define DLM_LSFL_NEWEXCL 0x00000008
+/* currently reserved due in-kernel use */
+#define __DLM_LSFL_RESERVED0 0x00000010
#endif /* _UAPI__DLM_DOT_H__ */