summaryrefslogtreecommitdiffstats
path: root/fs/gfs2/glops.c
diff options
context:
space:
mode:
authorAndreas Gruenbacher <agruenba@redhat.com>2017-06-30 14:47:15 +0200
committerBob Peterson <rpeterso@redhat.com>2017-07-05 14:20:24 +0200
commit4fd1a5795214bc6405f14691c1344ae8c3f17215 (patch)
treeac0d1a48b243c6e27b40460503bc99290fa3c564 /fs/gfs2/glops.c
parentGFS2: Eliminate vestigial sd_log_flush_wrapped (diff)
downloadlinux-4fd1a5795214bc6405f14691c1344ae8c3f17215.tar.xz
linux-4fd1a5795214bc6405f14691c1344ae8c3f17215.zip
gfs2: Get rid of flush_delayed_work in gfs2_evict_inode
So far, gfs2_evict_inode clears gl->gl_object and then flushes the glock work queue to make sure that inode glops which dereference gl->gl_object have finished running before the inode is destroyed. However, flushing the work queue may do more work than needed, and in particular, it may call into DLM, which we want to avoid here. Use a bit lock (GIF_GLOP_PENDING) to synchronize between the inode glops and gfs2_evict_inode instead to get rid of the flushing. In addition, flush the work queues of existing glocks before reusing them for new inodes to get those glocks into a known state: the glock state engine currently doesn't handle glock re-appropriation correctly. (We may be able to fix the glock state engine instead later.) Based on a patch by Steven Whitehouse <swhiteho@redhat.com>. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Bob Peterson <rpeterso@redhat.com>
Diffstat (limited to 'fs/gfs2/glops.c')
-rw-r--r--fs/gfs2/glops.c39
1 files changed, 32 insertions, 7 deletions
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5db59d444838..7449b19135c3 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -197,6 +197,27 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
}
+static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
+{
+ struct gfs2_inode *ip;
+
+ spin_lock(&gl->gl_lockref.lock);
+ ip = gl->gl_object;
+ if (ip)
+ set_bit(GIF_GLOP_PENDING, &ip->i_flags);
+ spin_unlock(&gl->gl_lockref.lock);
+ return ip;
+}
+
+static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
+{
+ if (!ip)
+ return;
+
+ clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
+ wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
+}
+
/**
* inode_go_sync - Sync the dirty data and/or metadata for an inode glock
* @gl: the glock protecting the inode
@@ -205,25 +226,24 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
static void inode_go_sync(struct gfs2_glock *gl)
{
- struct gfs2_inode *ip = gl->gl_object;
+ struct gfs2_inode *ip = gfs2_glock2inode(gl);
+ int isreg = ip && S_ISREG(ip->i_inode.i_mode);
struct address_space *metamapping = gfs2_glock2aspace(gl);
int error;
- if (ip && !S_ISREG(ip->i_inode.i_mode))
- ip = NULL;
- if (ip) {
+ if (isreg) {
if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
inode_dio_wait(&ip->i_inode);
}
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
- return;
+ goto out;
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
gfs2_log_flush(gl->gl_name.ln_sbd, gl, NORMAL_FLUSH);
filemap_fdatawrite(metamapping);
- if (ip) {
+ if (isreg) {
struct address_space *mapping = ip->i_inode.i_mapping;
filemap_fdatawrite(mapping);
error = filemap_fdatawait(mapping);
@@ -238,6 +258,9 @@ static void inode_go_sync(struct gfs2_glock *gl)
*/
smp_mb__before_atomic();
clear_bit(GLF_DIRTY, &gl->gl_flags);
+
+out:
+ gfs2_clear_glop_pending(ip);
}
/**
@@ -253,7 +276,7 @@ static void inode_go_sync(struct gfs2_glock *gl)
static void inode_go_inval(struct gfs2_glock *gl, int flags)
{
- struct gfs2_inode *ip = gl->gl_object;
+ struct gfs2_inode *ip = gfs2_glock2inode(gl);
gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
@@ -274,6 +297,8 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
}
if (ip && S_ISREG(ip->i_inode.i_mode))
truncate_inode_pages(ip->i_inode.i_mapping, 0);
+
+ gfs2_clear_glop_pending(ip);
}
/**