summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@citi.umich.edu>2010-03-04 18:03:16 +0100
committerJ. Bruce Fields <bfields@citi.umich.edu>2010-03-04 18:04:51 +0100
commit4ea41e2de5bba756858bb40f964e3490b6d1a25c (patch)
tree51a688be321dd42f80d8a0c966eb6a60408d50b3 /fs
parentnfsd4: fix minor memory leak (diff)
parentMerge branch 'linux-2.6.33' (diff)
downloadlinux-4ea41e2de5bba756858bb40f964e3490b6d1a25c.tar.xz
linux-4ea41e2de5bba756858bb40f964e3490b6d1a25c.zip
Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs into for-2.6.34-incoming
Resolve merge conflict in fs/xfs/linux-2.6/xfs_export.c.
Diffstat (limited to '')
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_file.c19
-rw-r--r--fs/9p/vfs_inode.c43
-rw-r--r--fs/9p/vfs_super.c3
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/namei.c7
-rw-r--r--fs/affs/super.c31
-rw-r--r--fs/affs/symlink.c7
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/bfs/inode.c43
-rw-r--r--fs/binfmt_aout.c1
-rw-r--r--fs/binfmt_elf.c27
-rw-r--r--fs/binfmt_elf_fdpic.c3
-rw-r--r--fs/binfmt_flat.c1
-rw-r--r--fs/binfmt_som.c1
-rw-r--r--fs/bio-integrity.c3
-rw-r--r--fs/bio.c9
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/acl.c13
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c13
-rw-r--r--fs/btrfs/extent-tree.c40
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/extent_map.c14
-rw-r--r--fs/btrfs/file.c104
-rw-r--r--fs/btrfs/inode.c72
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/relocation.c7
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/volumes.c17
-rw-r--r--fs/cachefiles/namei.c12
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifs_dfs_ref.c3
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/compat_ioctl.c12
-rw-r--r--fs/configfs/symlink.c4
-rw-r--r--fs/debugfs/inode.c11
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/file.c17
-rw-r--r--fs/ecryptfs/inode.c158
-rw-r--r--fs/ecryptfs/main.c4
-rw-r--r--fs/eventfd.c89
-rw-r--r--fs/exec.c56
-rw-r--r--fs/ext4/ext4.h9
-rw-r--r--fs/ext4/extents.c21
-rw-r--r--fs/ext4/inode.c82
-rw-r--r--fs/fcntl.c102
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/fuse/file.c3
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/lock_dlm.c11
-rw-r--r--fs/gfs2/ops_fstype.c14
-rw-r--r--fs/gfs2/ops_inode.c3
-rw-r--r--fs/gfs2/rgrp.c8
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/hppfs/hppfs.c18
-rw-r--r--fs/namei.c43
-rw-r--r--fs/namespace.c14
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c78
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nfs/pagelist.c17
-rw-r--r--fs/nfs/super.c15
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/vfs.c3
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c2
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h4
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c38
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c147
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmglue.c85
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c18
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/ocfs2_fs.h11
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/stack_o2cb.c12
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/symlink.c10
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--fs/proc/base.c25
-rw-r--r--fs/ramfs/file-nommu.c26
-rw-r--r--fs/reiserfs/inode.c2
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/romfs/super.c1
-rw-r--r--fs/sysfs/inode.c35
-rw-r--r--fs/xfs/Makefile2
-rw-r--r--fs/xfs/linux-2.6/kmem.c56
-rw-r--r--fs/xfs/linux-2.6/kmem.h21
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c11
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c290
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h52
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_fs_subr.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c21
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.h12
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c62
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c183
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c329
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h81
-rw-r--r--fs/xfs/linux-2.6/xfs_xattr.c27
-rw-r--r--fs/xfs/quota/xfs_dquot.c47
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c99
-rw-r--r--fs/xfs/quota/xfs_dquot_item.h4
-rw-r--r--fs/xfs/quota/xfs_qm.c40
-rw-r--r--fs/xfs/quota/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c6
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c49
-rw-r--r--fs/xfs/xfs_acl.h4
-rw-r--r--fs/xfs/xfs_ag.h16
-rw-r--r--fs/xfs/xfs_alloc.c96
-rw-r--r--fs/xfs/xfs_alloc_btree.c9
-rw-r--r--fs/xfs/xfs_attr.c52
-rw-r--r--fs/xfs/xfs_attr.h3
-rw-r--r--fs/xfs/xfs_attr_leaf.c30
-rw-r--r--fs/xfs/xfs_attr_sf.h2
-rw-r--r--fs/xfs/xfs_bmap.c17
-rw-r--r--fs/xfs/xfs_bmap_btree.c2
-rw-r--r--fs/xfs/xfs_bmap_btree.h1
-rw-r--r--fs/xfs/xfs_btree.c4
-rw-r--r--fs/xfs/xfs_buf_item.c72
-rw-r--r--fs/xfs/xfs_da_btree.c4
-rw-r--r--fs/xfs/xfs_da_btree.h5
-rw-r--r--fs/xfs/xfs_dfrag.c149
-rw-r--r--fs/xfs/xfs_dfrag.h3
-rw-r--r--fs/xfs/xfs_dir2.c8
-rw-r--r--fs/xfs/xfs_dir2.h4
-rw-r--r--fs/xfs/xfs_dir2_block.c9
-rw-r--r--fs/xfs/xfs_dir2_leaf.c2
-rw-r--r--fs/xfs/xfs_dir2_node.c2
-rw-r--r--fs/xfs/xfs_dir2_node.h2
-rw-r--r--fs/xfs/xfs_dir2_sf.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c4
-rw-r--r--fs/xfs/xfs_filestream.c42
-rw-r--r--fs/xfs/xfs_filestream.h28
-rw-r--r--fs/xfs/xfs_fsops.c42
-rw-r--r--fs/xfs/xfs_ialloc.c62
-rw-r--r--fs/xfs/xfs_iget.c11
-rw-r--r--fs/xfs/xfs_inode.c143
-rw-r--r--fs/xfs/xfs_inode.h11
-rw-r--r--fs/xfs/xfs_inode_item.c129
-rw-r--r--fs/xfs/xfs_inode_item.h6
-rw-r--r--fs/xfs/xfs_itable.c12
-rw-r--r--fs/xfs/xfs_log.c383
-rw-r--r--fs/xfs/xfs_log.h19
-rw-r--r--fs/xfs/xfs_log_priv.h5
-rw-r--r--fs/xfs/xfs_log_recover.c222
-rw-r--r--fs/xfs/xfs_log_recover.h23
-rw-r--r--fs/xfs/xfs_mount.c181
-rw-r--r--fs/xfs/xfs_mount.h27
-rw-r--r--fs/xfs/xfs_mru_cache.c2
-rw-r--r--fs/xfs/xfs_mru_cache.h1
-rw-r--r--fs/xfs/xfs_quota.h9
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_rw.c155
-rw-r--r--fs/xfs/xfs_rw.h4
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--fs/xfs/xfs_trans.h3
-rw-r--r--fs/xfs/xfs_trans_ail.c34
-rw-r--r--fs/xfs/xfs_trans_buf.c27
-rw-r--r--fs/xfs/xfs_types.h4
-rw-r--r--fs/xfs/xfs_vnodeops.c33
-rw-r--r--fs/xfs/xfs_vnodeops.h10
201 files changed, 3128 insertions, 2415 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cf62b05e296a..7d6c2139891d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
{
- char *options;
+ char *options, *tmp_options;
substring_t args[MAX_OPT_ARGS];
char *p;
int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
if (!opts)
return 0;
- options = kstrdup(opts, GFP_KERNEL);
- if (!options)
+ tmp_options = kstrdup(opts, GFP_KERNEL);
+ if (!tmp_options) {
+ ret = -ENOMEM;
goto fail_option_alloc;
+ }
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
break;
case Opt_cache:
s = match_strdup(&args[0]);
- if (!s)
- goto fail_option_alloc;
+ if (!s) {
+ ret = -ENOMEM;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "problem allocating copy of cache arg\n");
+ goto free_and_return;
+ }
if (strcmp(s, "loose") == 0)
v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_access:
s = match_strdup(&args[0]);
- if (!s)
- goto fail_option_alloc;
+ if (!s) {
+ ret = -ENOMEM;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "problem allocating copy of access arg\n");
+ goto free_and_return;
+ }
v9ses->flags &= ~V9FS_ACCESS_MASK;
if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
continue;
}
}
- kfree(options);
- return ret;
+free_and_return:
+ kfree(tmp_options);
fail_option_alloc:
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy of option argument\n");
- return -ENOMEM;
+ return ret;
}
/**
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 3a7560e35865..ed835836e0dc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
int v9fs_uflags2omode(int uflags, int extended);
ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
+void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3902bf43a088..74a0461a9ac0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
return total;
}
+static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
+ int datasync)
+{
+ struct p9_fid *fid;
+ struct p9_wstat wstat;
+ int retval;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
+ dentry, datasync);
+
+ fid = filp->private_data;
+ v9fs_blank_wstat(&wstat);
+
+ retval = p9_client_wstat(fid, &wstat);
+ return retval;
+}
+
static const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync,
};
const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 18f74ec4dce9..a407fa3388c0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
*
*/
-static void
+void
v9fs_blank_wstat(struct p9_wstat *wstat)
{
wstat->type = ~0;
@@ -1001,44 +1001,6 @@ done:
}
/**
- * v9fs_vfs_readlink - read a symlink's location
- * @dentry: dentry for symlink
- * @buffer: buffer to load symlink location into
- * @buflen: length of buffer
- *
- */
-
-static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer,
- int buflen)
-{
- int retval;
- int ret;
- char *link = __getname();
-
- if (unlikely(!link))
- return -ENOMEM;
-
- if (buflen > PATH_MAX)
- buflen = PATH_MAX;
-
- P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
- dentry);
-
- retval = v9fs_readlink(dentry, link, buflen);
-
- if (retval > 0) {
- if ((ret = copy_to_user(buffer, link, retval)) != 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "problem copying to user: %d\n", ret);
- retval = ret;
- }
- }
-
- __putname(link);
- return retval;
-}
-
-/**
* v9fs_vfs_follow_link - follow a symlink path
* @dentry: dentry for symlink
* @nd: nameidata
@@ -1230,7 +1192,6 @@ static const struct inode_operations v9fs_dir_inode_operations_ext = {
.rmdir = v9fs_vfs_rmdir,
.mknod = v9fs_vfs_mknod,
.rename = v9fs_vfs_rename,
- .readlink = v9fs_vfs_readlink,
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
};
@@ -1253,7 +1214,7 @@ static const struct inode_operations v9fs_file_inode_operations = {
};
static const struct inode_operations v9fs_symlink_inode_operations = {
- .readlink = v9fs_vfs_readlink,
+ .readlink = generic_readlink,
.follow_link = v9fs_vfs_follow_link,
.put_link = v9fs_vfs_put_link,
.getattr = v9fs_vfs_getattr,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 14a86448572c..69357c0d9899 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -188,7 +188,8 @@ static void v9fs_kill_super(struct super_block *s)
P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
- v9fs_dentry_release(s->s_root); /* clunk root */
+ if (s->s_root)
+ v9fs_dentry_release(s->s_root); /* clunk root */
kill_anon_super(s);
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e511dc621a2e..0e40caaba456 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -106,8 +106,8 @@ struct affs_sb_info {
u32 s_last_bmap;
struct buffer_head *s_bmap_bh;
char *s_prefix; /* Prefix for volumes and assigns. */
- int s_prefix_len; /* Length of prefix. */
char s_volume[32]; /* Volume prefix for absolute symlinks. */
+ spinlock_t symlink_lock; /* protects the previous two */
};
#define SF_INTL 0x0001 /* International filesystem. */
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 960d336ec694..d70bbbac6b7b 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -341,10 +341,13 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
p = (char *)AFFS_HEAD(bh)->table;
lc = '/';
if (*symname == '/') {
+ struct affs_sb_info *sbi = AFFS_SB(sb);
while (*symname == '/')
symname++;
- while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */
- *p++ = AFFS_SB(sb)->s_volume[i++];
+ spin_lock(&sbi->symlink_lock);
+ while (sbi->s_volume[i]) /* Cannot overflow */
+ *p++ = sbi->s_volume[i++];
+ spin_unlock(&sbi->symlink_lock);
}
while (i < maxlen && (c = *symname++)) {
if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 104fdcb3a7fc..d41e9673cd97 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
switch (token) {
case Opt_bs:
if (match_int(&args[0], &n))
- return -EINVAL;
+ return 0;
if (n != 512 && n != 1024 && n != 2048
&& n != 4096) {
printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
@@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
break;
case Opt_mode:
if (match_octal(&args[0], &option))
- return 1;
+ return 0;
*mode = option & 0777;
*mount_opts |= SF_SETMODE;
break;
@@ -221,8 +221,6 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
*mount_opts |= SF_MUFS;
break;
case Opt_prefix:
- /* Free any previous prefix */
- kfree(*prefix);
*prefix = match_strdup(&args[0]);
if (!*prefix)
return 0;
@@ -233,21 +231,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
break;
case Opt_reserved:
if (match_int(&args[0], reserved))
- return 1;
+ return 0;
break;
case Opt_root:
if (match_int(&args[0], root))
- return 1;
+ return 0;
break;
case Opt_setgid:
if (match_int(&args[0], &option))
- return 1;
+ return 0;
*gid = option;
*mount_opts |= SF_SETGID;
break;
case Opt_setuid:
if (match_int(&args[0], &option))
- return -EINVAL;
+ return 0;
*uid = option;
*mount_opts |= SF_SETUID;
break;
@@ -311,11 +309,14 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
sb->s_fs_info = sbi;
mutex_init(&sbi->s_bmlock);
+ spin_lock_init(&sbi->symlink_lock);
if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
&blocksize,&sbi->s_prefix,
sbi->s_volume, &mount_flags)) {
printk(KERN_ERR "AFFS: Error parsing options\n");
+ kfree(sbi->s_prefix);
+ kfree(sbi);
return -EINVAL;
}
/* N.B. after this point s_prefix must be released */
@@ -516,14 +517,18 @@ affs_remount(struct super_block *sb, int *flags, char *data)
unsigned long mount_flags;
int res = 0;
char *new_opts = kstrdup(data, GFP_KERNEL);
+ char volume[32];
+ char *prefix = NULL;
pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
*flags |= MS_NODIRATIME;
+ memcpy(volume, sbi->s_volume, 32);
if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
- &blocksize, &sbi->s_prefix, sbi->s_volume,
+ &blocksize, &prefix, volume,
&mount_flags)) {
+ kfree(prefix);
kfree(new_opts);
return -EINVAL;
}
@@ -534,6 +539,14 @@ affs_remount(struct super_block *sb, int *flags, char *data)
sbi->s_mode = mode;
sbi->s_uid = uid;
sbi->s_gid = gid;
+ /* protect against readers */
+ spin_lock(&sbi->symlink_lock);
+ if (prefix) {
+ kfree(sbi->s_prefix);
+ sbi->s_prefix = prefix;
+ }
+ memcpy(sbi->s_volume, volume, 32);
+ spin_unlock(&sbi->symlink_lock);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
unlock_kernel();
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
index 41782539c907..ee00f08c4f53 100644
--- a/fs/affs/symlink.c
+++ b/fs/affs/symlink.c
@@ -20,7 +20,6 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
int i, j;
char c;
char lc;
- char *pf;
pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
@@ -32,11 +31,15 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
j = 0;
lf = (struct slink_front *)bh->b_data;
lc = 0;
- pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/";
if (strchr(lf->symname,':')) { /* Handle assign or volume name */
+ struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
+ char *pf;
+ spin_lock(&sbi->symlink_lock);
+ pf = sbi->s_prefix ? sbi->s_prefix : "/";
while (i < 1023 && (c = pf[i]))
link[i++] = c;
+ spin_unlock(&sbi->symlink_lock);
while (i < 1023 && lf->symname[j] != ':')
link[i++] = lf->symname[j++];
if (i < 1023)
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 33baf27fac78..34ddda888e63 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
unacquire_priv_sbp:
+ kfree(befs_sb->mount_opts.iocharset);
kfree(sb->s_fs_info);
unacquire_none:
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 6f60336c6628..8f3d9fd89604 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
struct inode *inode;
unsigned i, imap_len;
struct bfs_sb_info *info;
- long ret = -EINVAL;
+ int ret = -EINVAL;
unsigned long i_sblock, i_eblock, i_eoff, s_size;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ mutex_init(&info->bfs_lock);
s->s_fs_info = info;
sb_set_blocksize(s, BFS_BSIZE);
- bh = sb_bread(s, 0);
- if(!bh)
+ info->si_sbh = sb_bread(s, 0);
+ if (!info->si_sbh)
goto out;
- bfs_sb = (struct bfs_super_block *)bh->b_data;
+ bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data;
if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
if (!silent)
printf("No BFS filesystem on %s (magic=%08x)\n",
s->s_id, le32_to_cpu(bfs_sb->s_magic));
- goto out;
+ goto out1;
}
if (BFS_UNCLEAN(bfs_sb, s) && !silent)
printf("%s is unclean, continuing\n", s->s_id);
s->s_magic = BFS_MAGIC;
- info->si_sbh = bh;
if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
printf("Superblock is corrupted\n");
- goto out;
+ goto out1;
}
info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) /
@@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
imap_len = (info->si_lasti / 8) + 1;
info->si_imap = kzalloc(imap_len, GFP_KERNEL);
if (!info->si_imap)
- goto out;
+ goto out1;
for (i = 0; i < BFS_ROOT_INO; i++)
set_bit(i, info->si_imap);
@@ -398,15 +398,13 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
inode = bfs_iget(s, BFS_ROOT_INO);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
- kfree(info->si_imap);
- goto out;
+ goto out2;
}
s->s_root = d_alloc_root(inode);
if (!s->s_root) {
iput(inode);
ret = -ENOMEM;
- kfree(info->si_imap);
- goto out;
+ goto out2;
}
info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS;
@@ -419,10 +417,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
bh = sb_bread(s, info->si_blocks - 1);
if (!bh) {
printf("Last block not available: %lu\n", info->si_blocks - 1);
- iput(inode);
ret = -EIO;
- kfree(info->si_imap);
- goto out;
+ goto out3;
}
brelse(bh);
@@ -459,11 +455,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
printf("Inode 0x%08x corrupted\n", i);
brelse(bh);
- s->s_root = NULL;
- kfree(info->si_imap);
- kfree(info);
- s->s_fs_info = NULL;
- return -EIO;
+ ret = -EIO;
+ goto out3;
}
if (!di->i_ino) {
@@ -483,11 +476,17 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
s->s_dirt = 1;
}
dump_imap("read_super", s);
- mutex_init(&info->bfs_lock);
return 0;
+out3:
+ dput(s->s_root);
+ s->s_root = NULL;
+out2:
+ kfree(info->si_imap);
+out1:
+ brelse(info->si_sbh);
out:
- brelse(bh);
+ mutex_destroy(&info->bfs_lock);
kfree(info);
s->s_fs_info = NULL;
return ret;
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 346b69405363..fdd397099172 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -264,6 +264,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
#else
set_personality(PER_LINUX);
#endif
+ setup_new_exec(bprm);
current->mm->end_code = ex.a_text +
(current->mm->start_code = N_TXTADDR(ex));
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index edd90c49003c..fd5b2ea5d299 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -662,27 +662,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
goto out_free_interp;
- /*
- * The early SET_PERSONALITY here is so that the lookup
- * for the interpreter happens in the namespace of the
- * to-be-execed image. SET_PERSONALITY can select an
- * alternate root.
- *
- * However, SET_PERSONALITY is NOT allowed to switch
- * this task into the new images's memory mapping
- * policy - that is, TASK_SIZE must still evaluate to
- * that which is appropriate to the execing application.
- * This is because exit_mmap() needs to have TASK_SIZE
- * evaluate to the size of the old image.
- *
- * So if (say) a 64-bit application is execing a 32-bit
- * application it is the architecture's responsibility
- * to defer changing the value of TASK_SIZE until the
- * switch really is going to happen - do this in
- * flush_thread(). - akpm
- */
- SET_PERSONALITY(loc->elf_ex);
-
interpreter = open_exec(elf_interpreter);
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
@@ -730,9 +709,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* Verify the interpreter has a valid arch */
if (!elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
- } else {
- /* Executables without an interpreter also need a personality */
- SET_PERSONALITY(loc->elf_ex);
}
/* Flush all traces of the currently running executable */
@@ -752,7 +728,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
current->flags |= PF_RANDOMIZE;
- arch_pick_mmap_layout(current->mm);
+
+ setup_new_exec(bprm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index c57d9ce5ff7e..18d77297ccc8 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -321,6 +321,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
set_personality(PER_LINUX_FDPIC);
if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
+
+ setup_new_exec(bprm);
+
set_binfmt(&elf_fdpic_format);
current->mm->start_code = 0;
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index d4a00ea1054c..42c6b4a54445 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -519,6 +519,7 @@ static int load_flat_file(struct linux_binprm * bprm,
/* OK, This is the point of no return */
set_personality(PER_LINUX_32BIT);
+ setup_new_exec(bprm);
}
/*
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index 2a9b5330cc5e..cc8560f6c9b0 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -227,6 +227,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
current->personality = PER_HPUX;
+ setup_new_exec(bprm);
/* Set the task size for HP-UX processes such that
* the gateway page is outside the address space.
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 49a34e7f7306..a16f29e888cd 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr)
static inline int use_bip_pool(unsigned int idx)
{
- if (idx == BIOVEC_NR_POOLS)
+ if (idx == BIOVEC_MAX_IDX)
return 1;
return 0;
@@ -95,6 +95,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
/* Use mempool if lower order alloc failed or max vecs were requested */
if (bip == NULL) {
+ idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
if (unlikely(bip == NULL)) {
diff --git a/fs/bio.c b/fs/bio.c
index 76e6713abf94..88094afc29ea 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
i = 0;
while (i < bio_slab_nr) {
- struct bio_slab *bslab = &bio_slabs[i];
+ bslab = &bio_slabs[i];
if (!bslab->slab && entry == -1)
entry = i;
@@ -542,13 +542,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
if (page == prev->bv_page &&
offset == prev->bv_offset + prev->bv_len) {
+ unsigned int prev_bv_len = prev->bv_len;
prev->bv_len += len;
if (q->merge_bvec_fn) {
struct bvec_merge_data bvm = {
+ /* prev_bvec is already charged in
+ bi_size, discharge it in order to
+ simulate merging updated prev_bvec
+ as new bvec. */
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size,
+ .bi_size = bio->bi_size - prev_bv_len,
.bi_rw = bio->bi_rw,
};
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 73d6a735b8f3..d11d0289f3d2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
if (!sb)
goto out;
if (sb->s_flags & MS_RDONLY) {
- deactivate_locked_super(sb);
+ sb->s_frozen = SB_FREEZE_TRANS;
+ up_write(&sb->s_umount);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
@@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
BUG_ON(sb->s_bdev != bdev);
down_write(&sb->s_umount);
if (sb->s_flags & MS_RDONLY)
- goto out_deactivate;
+ goto out_unfrozen;
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
@@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
}
}
+out_unfrozen:
sb->s_frozen = SB_UNFROZEN;
smp_wmb();
wake_up(&sb->s_wait_unfrozen);
-out_deactivate:
if (sb)
deactivate_locked_super(sb);
out_unlock:
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 2e9e69987a82..6df6d6ed74fd 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -112,12 +112,14 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
switch (type) {
case ACL_TYPE_ACCESS:
mode = inode->i_mode;
- ret = posix_acl_equiv_mode(acl, &mode);
- if (ret < 0)
- return ret;
- ret = 0;
- inode->i_mode = mode;
name = POSIX_ACL_XATTR_ACCESS;
+ if (acl) {
+ ret = posix_acl_equiv_mode(acl, &mode);
+ if (ret < 0)
+ return ret;
+ inode->i_mode = mode;
+ }
+ ret = 0;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
@@ -242,6 +244,7 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
ACL_TYPE_ACCESS);
}
}
+ posix_acl_release(clone);
}
failed:
posix_acl_release(acl);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9f806dd04c27..2aa8ec6a0981 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1161,6 +1161,7 @@ struct btrfs_root {
#define BTRFS_MOUNT_SSD_SPREAD (1 << 8)
#define BTRFS_MOUNT_NOSSD (1 << 9)
#define BTRFS_MOUNT_DISCARD (1 << 10)
+#define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 009e3bd18f23..2b59201b955c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_recover_relocation(tree_root);
- BUG_ON(ret);
+ if (ret < 0) {
+ printk(KERN_WARNING
+ "btrfs: failed to recover relocation\n");
+ err = -EINVAL;
+ goto fail_trans_kthread;
+ }
}
location.objectid = BTRFS_FS_TREE_OBJECTID;
@@ -1993,6 +1998,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!fs_info->fs_root)
goto fail_trans_kthread;
+ if (!(sb->s_flags & MS_RDONLY)) {
+ down_read(&fs_info->cleanup_work_sem);
+ btrfs_orphan_cleanup(fs_info->fs_root);
+ up_read(&fs_info->cleanup_work_sem);
+ }
+
return tree_root;
fail_trans_kthread:
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 56e50137d0e6..559f72489b3b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -83,6 +83,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
return (cache->flags & bits) == bits;
}
+void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+{
+ atomic_inc(&cache->count);
+}
+
+void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->count))
+ kfree(cache);
+}
+
/*
* this adds the block group to the fs_info rb tree for the block group
* cache
@@ -156,7 +167,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
}
}
if (ret)
- atomic_inc(&ret->count);
+ btrfs_get_block_group(ret);
spin_unlock(&info->block_group_cache_lock);
return ret;
@@ -407,6 +418,8 @@ err:
put_caching_control(caching_ctl);
atomic_dec(&block_group->space_info->caching_threads);
+ btrfs_put_block_group(block_group);
+
return 0;
}
@@ -447,6 +460,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
up_write(&fs_info->extent_commit_sem);
atomic_inc(&cache->space_info->caching_threads);
+ btrfs_get_block_group(cache);
tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
cache->key.objectid);
@@ -486,12 +500,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
return cache;
}
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
-{
- if (atomic_dec_and_test(&cache->count))
- kfree(cache);
-}
-
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
@@ -2582,7 +2590,7 @@ next_block_group(struct btrfs_root *root,
if (node) {
cache = rb_entry(node, struct btrfs_block_group_cache,
cache_node);
- atomic_inc(&cache->count);
+ btrfs_get_block_group(cache);
} else
cache = NULL;
spin_unlock(&root->fs_info->block_group_cache_lock);
@@ -4227,7 +4235,7 @@ search:
u64 offset;
int cached;
- atomic_inc(&block_group->count);
+ btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;
have_block_group:
@@ -4315,7 +4323,7 @@ have_block_group:
btrfs_put_block_group(block_group);
block_group = last_ptr->block_group;
- atomic_inc(&block_group->count);
+ btrfs_get_block_group(block_group);
spin_unlock(&last_ptr->lock);
spin_unlock(&last_ptr->refill_lock);
@@ -5394,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
int ret;
while (level >= 0) {
- if (path->slots[level] >=
- btrfs_header_nritems(path->nodes[level]))
- break;
-
ret = walk_down_proc(trans, root, path, wc, lookup_info);
if (ret > 0)
break;
@@ -5405,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
if (level == 0)
break;
+ if (path->slots[level] >=
+ btrfs_header_nritems(path->nodes[level]))
+ break;
+
ret = do_walk_down(trans, root, path, wc, &lookup_info);
if (ret > 0) {
path->slots[level]++;
@@ -7395,9 +7403,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
wait_block_group_cache_done(block_group);
btrfs_remove_free_space_cache(block_group);
-
- WARN_ON(atomic_read(&block_group->count) != 1);
- kfree(block_group);
+ btrfs_put_block_group(block_group);
spin_lock(&info->block_group_cache_lock);
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96577e8bf9fd..b177ed319612 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
spin_unlock(&tree->buffer_lock);
goto free_eb;
}
- spin_unlock(&tree->buffer_lock);
-
/* add one reference for the tree */
atomic_inc(&eb->refs);
+ spin_unlock(&tree->buffer_lock);
return eb;
free_eb:
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 46bea0f4dc7b..428fcac45f90 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -155,20 +155,6 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
return NULL;
}
-/*
- * look for an offset in the tree, and if it can't be found, return
- * the first offset we can find smaller than 'offset'.
- */
-static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
-{
- struct rb_node *prev;
- struct rb_node *ret;
- ret = __tree_search(root, offset, &prev, NULL);
- if (!ret)
- return prev;
- return ret;
-}
-
/* check to see if two extent_map structs are adjacent and safe to merge */
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
{
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index feaa13b105d9..6ed434ac037f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -506,7 +506,8 @@ next_slot:
}
static int extent_mergeable(struct extent_buffer *leaf, int slot,
- u64 objectid, u64 bytenr, u64 *start, u64 *end)
+ u64 objectid, u64 bytenr, u64 orig_offset,
+ u64 *start, u64 *end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
@@ -522,6 +523,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
+ btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
@@ -561,6 +563,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
u64 split;
int del_nr = 0;
int del_slot = 0;
+ int recow;
int ret;
btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -568,6 +571,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
BUG_ON(!path);
again:
+ recow = 0;
split = start;
key.objectid = inode->i_ino;
key.type = BTRFS_EXTENT_DATA_KEY;
@@ -591,12 +595,60 @@ again:
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
+ memcpy(&new_key, &key, sizeof(new_key));
+
+ if (start == key.offset && end < extent_end) {
+ other_start = 0;
+ other_end = start;
+ if (extent_mergeable(leaf, path->slots[0] - 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ new_key.offset = end;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - end);
+ btrfs_set_file_extent_offset(leaf, fi,
+ end - orig_offset);
+ fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ end - other_start);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+ }
+
+ if (start > key.offset && end == extent_end) {
+ other_start = end;
+ other_end = 0;
+ if (extent_mergeable(leaf, path->slots[0] + 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ start - key.offset);
+ path->slots[0]++;
+ new_key.offset = start;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ other_end - start);
+ btrfs_set_file_extent_offset(leaf, fi,
+ start - orig_offset);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+ }
while (start > key.offset || end < extent_end) {
if (key.offset == start)
split = end;
- memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = split;
ret = btrfs_duplicate_item(trans, root, path, &new_key);
if (ret == -EAGAIN) {
@@ -631,15 +683,18 @@ again:
path->slots[0]--;
extent_end = end;
}
+ recow = 1;
}
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
-
other_start = end;
other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
+ if (extent_mergeable(leaf, path->slots[0] + 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ if (recow) {
+ btrfs_release_path(root, path);
+ goto again;
+ }
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
@@ -650,8 +705,13 @@ again:
}
other_start = 0;
other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
+ if (extent_mergeable(leaf, path->slots[0] - 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ if (recow) {
+ btrfs_release_path(root, path);
+ goto again;
+ }
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
@@ -661,21 +721,23 @@ again:
BUG_ON(ret);
}
if (del_nr == 0) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_mark_buffer_dirty(leaf);
- goto out;
- }
-
- fi = btrfs_item_ptr(leaf, del_slot - 1,
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - key.offset);
- btrfs_mark_buffer_dirty(leaf);
+ } else {
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - key.offset);
+ btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- BUG_ON(ret);
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ BUG_ON(ret);
+ }
out:
btrfs_free_path(path);
return 0;
@@ -1073,7 +1135,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
}
mutex_lock(&dentry->d_inode->i_mutex);
out:
- return ret > 0 ? EIO : ret;
+ return ret > 0 ? -EIO : ret;
}
static const struct vm_operations_struct btrfs_file_vm_ops = {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5440bab23635..4deb280f8969 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -483,7 +483,8 @@ again:
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
- BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
+ if (!btrfs_test_opt(root, FORCE_COMPRESS))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
if (will_compress) {
*num_added += 1;
@@ -1680,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
* before we start the transaction. It limits the amount of btree
* reads required while inside the transaction.
*/
-static noinline void reada_csum(struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_ordered_extent *ordered_extent)
-{
- struct btrfs_ordered_sum *sum;
- u64 bytenr;
-
- sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
- list);
- bytenr = sum->sums[0].bytenr;
-
- /*
- * we don't care about the results, the point of this search is
- * just to get the btree leaves into ram
- */
- btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
-}
-
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
@@ -1708,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_path *path;
int compressed = 0;
int ret;
@@ -1716,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
if (!ret)
return 0;
- /*
- * before we join the transaction, try to do some of our IO.
- * This will limit the amount of IO that we have to do with
- * the transaction running. We're unlikely to need to do any
- * IO if the file extents are new, the disk_i_size checks
- * covers the most common case.
- */
- if (start < BTRFS_I(inode)->disk_i_size) {
- path = btrfs_alloc_path();
- if (path) {
- ret = btrfs_lookup_file_extent(NULL, root, path,
- inode->i_ino,
- start, 0);
- ordered_extent = btrfs_lookup_ordered_extent(inode,
- start);
- if (!list_empty(&ordered_extent->list)) {
- btrfs_release_path(root, path);
- reada_csum(root, path, ordered_extent);
- }
- btrfs_free_path(path);
- }
- }
-
- if (!ordered_extent)
- ordered_extent = btrfs_lookup_ordered_extent(inode, start);
+ ordered_extent = btrfs_lookup_ordered_extent(inode, start);
BUG_ON(!ordered_extent);
+
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list));
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -3995,7 +3954,11 @@ skip:
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
- filp->f_pos = INT_LIMIT(off_t);
+ /*
+ * 32-bit glibc will use getdents64, but then strtol -
+ * so the last number we can serve is this.
+ */
+ filp->f_pos = 0x7fffffff;
else
filp->f_pos++;
nopos:
@@ -5789,7 +5752,7 @@ out_fail:
}
static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
- u64 alloc_hint, int mode)
+ u64 alloc_hint, int mode, loff_t actual_len)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5798,6 +5761,7 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
u64 cur_offset = start;
u64 num_bytes = end - start;
int ret = 0;
+ u64 i_size;
while (num_bytes > 0) {
alloc_size = min(num_bytes, root->fs_info->max_extent);
@@ -5835,9 +5799,15 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- cur_offset > inode->i_size) {
- i_size_write(inode, cur_offset);
- btrfs_ordered_update_i_size(inode, cur_offset, NULL);
+ (actual_len > inode->i_size) &&
+ (cur_offset > inode->i_size)) {
+
+ if (cur_offset > actual_len)
+ i_size = actual_len;
+ else
+ i_size = cur_offset;
+ i_size_write(inode, i_size);
+ btrfs_ordered_update_i_size(inode, i_size, NULL);
}
ret = btrfs_update_inode(trans, root, inode);
@@ -5930,7 +5900,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
ret = prealloc_file_range(inode,
cur_offset, last_byte,
- alloc_hint, mode);
+ alloc_hint, mode, offset+len);
if (ret < 0) {
free_extent_map(em);
break;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b10a49d4bc6a..5c2a9e78a949 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -626,6 +626,8 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
if (ordered)
offset = entry_end(ordered);
+ else
+ offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
mutex_lock(&tree->mutex);
disk_i_size = BTRFS_I(inode)->disk_i_size;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a9728680eca8..ab7ab5318745 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3281,8 +3281,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
return -ENOMEM;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ kfree(cluster);
return -ENOMEM;
+ }
rc->extents_found = 0;
rc->extents_skipped = 0;
@@ -3762,7 +3764,8 @@ out:
BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
- btrfs_orphan_cleanup(fs_root);
+ else
+ btrfs_orphan_cleanup(fs_root);
}
return err;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3f9b45704fcd..8a1ea6e64575 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,7 +66,8 @@ enum {
Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
- Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
+ Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio,
+ Opt_flushoncommit,
Opt_discard, Opt_err,
};
@@ -82,6 +83,7 @@ static match_table_t tokens = {
{Opt_alloc_start, "alloc_start=%s"},
{Opt_thread_pool, "thread_pool=%d"},
{Opt_compress, "compress"},
+ {Opt_compress_force, "compress-force"},
{Opt_ssd, "ssd"},
{Opt_ssd_spread, "ssd_spread"},
{Opt_nossd, "nossd"},
@@ -173,6 +175,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
printk(KERN_INFO "btrfs: use compression\n");
btrfs_set_opt(info->mount_opt, COMPRESS);
break;
+ case Opt_compress_force:
+ printk(KERN_INFO "btrfs: forcing compression\n");
+ btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
+ btrfs_set_opt(info->mount_opt, COMPRESS);
+ break;
case Opt_ssd:
printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
btrfs_set_opt(info->mount_opt, SSD);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 198cff28766d..41ecbb2347f2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1135,7 +1135,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
root->fs_info->avail_metadata_alloc_bits;
if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
- root->fs_info->fs_devices->rw_devices <= 4) {
+ root->fs_info->fs_devices->num_devices <= 4) {
printk(KERN_ERR "btrfs: unable to go below four devices "
"on raid10\n");
ret = -EINVAL;
@@ -1143,7 +1143,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
}
if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
- root->fs_info->fs_devices->rw_devices <= 2) {
+ root->fs_info->fs_devices->num_devices <= 2) {
printk(KERN_ERR "btrfs: unable to go below two "
"devices on raid1\n");
ret = -EINVAL;
@@ -1434,8 +1434,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
return -EINVAL;
bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
- if (!bdev)
- return -EIO;
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
if (root->fs_info->fs_devices->seeding) {
seeding_dev = 1;
@@ -2538,6 +2538,11 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
if (!em)
return 1;
+ if (btrfs_test_opt(root, DEGRADED)) {
+ free_extent_map(em);
+ return 0;
+ }
+
map = (struct map_lookup *)em->bdev;
for (i = 0; i < map->num_stripes; i++) {
if (!map->stripes[i].dev->writeable) {
@@ -2649,8 +2654,10 @@ again:
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
- if (!em && unplug_page)
+ if (!em && unplug_page) {
+ kfree(multi);
return 0;
+ }
if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 14ac4806e291..eeb4986ea7db 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -348,7 +348,17 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
dir = dget_parent(object->dentry);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- ret = cachefiles_bury_object(cache, dir, object->dentry);
+
+ /* we need to check that our parent is _still_ our parent - it may have
+ * been renamed */
+ if (dir == object->dentry->d_parent) {
+ ret = cachefiles_bury_object(cache, dir, object->dentry);
+ } else {
+ /* it got moved, presumably by cachefilesd culling it, so it's
+ * no longer in the key path and we can ignore it */
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = 0;
+ }
dput(dir);
_leave(" = %d", ret);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7b2600b380d7..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
+Version 1.62
+------------
+Add sockopt=TCP_NODELAY mount option.
+
Version 1.61
------------
Fix append problem to Samba servers (files opened with O_APPEND could
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index fea9e898c4ba..b44ce0a0711c 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -269,7 +269,7 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
int err;
mntget(newmnt);
- err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags, mntlist);
+ err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags | MNT_SHRINKABLE, mntlist);
switch (err) {
case 0:
path_put(&nd->path);
@@ -371,7 +371,6 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
if (IS_ERR(mnt))
goto out_err;
- nd->path.mnt->mnt_flags |= MNT_SHRINKABLE;
rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list);
out:
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.61"
+#define CIFS_VERSION "1.62"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
bool svlocal:1; /* local server or remote */
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
+ bool tcp_nodelay;
atomic_t inFlight; /* number of requests on the wire to server */
#ifdef CONFIG_CIFS_STATS2
atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 3bbcaa716b3c..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
unsigned int rsize;
unsigned int wsize;
- unsigned int sockopt;
+ bool sockopt_tcp_nodelay:1;
unsigned short int port;
char *prepath;
};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
simple_strtoul(value, &value, 0);
}
} else if (strnicmp(data, "sockopt", 5) == 0) {
- if (value && *value) {
- vol->sockopt =
- simple_strtoul(value, &value, 0);
+ if (!value || !*value) {
+ cERROR(1, ("no socket option specified"));
+ continue;
+ } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
+ vol->sockopt_tcp_nodelay = 1;
}
} else if (strnicmp(data, "netbiosname", 4) == 0) {
if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
+ tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
atomic_set(&tcp_ses->inFlight, 0);
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
ipv4_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
socket->sk->sk_rcvbuf = 140 * 1024;
}
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
ipv6_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
socket->sk->sk_sndtimeo = 5 * HZ;
+
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
server->ssocket = socket;
return rc;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cf18ee765590..e3fda978f481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
CIFS_MOUNT_MAP_SPECIAL_CHR);
}
- if (!rc)
+ if (!rc) {
rc = inode_setattr(inode, attrs);
+
+ /* force revalidate when any of these times are set since some
+ of the fs types (eg ext3, fat) do not have fine enough
+ time granularity to match protocol, and we do not have a
+ a way (yet) to query the server fs's time granularity (and
+ whether it rounds times down).
+ */
+ if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
+ cifsInode->time = 0;
+ }
out:
kfree(args);
kfree(full_path);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f84062f9a985..c343b14ba2d3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
cFYI(1, ("For %s", name->name));
+ if (parent->d_op && parent->d_op->d_hash)
+ parent->d_op->d_hash(parent, name);
+ else
+ name->hash = full_name_hash(name->name, name->len);
+
dentry = d_lookup(parent, name);
if (dentry) {
/* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
min(len, max_len), nlt,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ pqst->len -= nls_nullsize(nlt);
} else {
pqst->name = filename;
pqst->len = len;
}
- pqst->hash = full_name_hash(pqst->name, pqst->len);
-/* cFYI(1, ("filldir on %s",pqst->name)); */
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7085a6275c4c..aaa9c1c5a5bd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
- } else { /* 300 should be long enough for any conceivable user name */
+ } else {
bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
- 300, nls_cp);
+ MAX_USERNAME_SIZE, nls_cp);
}
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* copy user */
if (ses->userName == NULL) {
/* BB what about null user mounts - check that we do this BB */
- } else { /* 300 should be long enough for any conceivable user name */
- strncpy(bcc_ptr, ses->userName, 300);
+ } else {
+ strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
}
- /* BB improve check for overflow */
- bcc_ptr += strnlen(ses->userName, 300);
+ bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 332dd00f0894..0ca9ec4a79c3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
u32 data;
void __user *dxferp;
int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return sys_ioctl(fd, cmd, (unsigned long)sgio32);
if (get_user(iovec_count, &sgio32->iovec_count))
return -EFAULT;
@@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF)
COMPATIBLE_IOCTL(TIOCLINUX)
COMPATIBLE_IOCTL(TIOCSBRK)
COMPATIBLE_IOCTL(TIOCCBRK)
+COMPATIBLE_IOCTL(TIOCGSID)
COMPATIBLE_IOCTL(TIOCGICOUNT)
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD)
@@ -1005,6 +1012,9 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
#endif
+/* Big V (don't complain on serial console) */
+IGNORE_IOCTL(VT_OPENQRY)
+IGNORE_IOCTL(VT_GETMODE)
/* Little p (/dev/rtc, /dev/envctrl, etc.) */
COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF)
@@ -1035,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
/* loop */
IGNORE_IOCTL(LOOP_CLR_FD)
+/* md calls this on random blockdevs */
+IGNORE_IOCTL(RAID_VERSION)
/* SG stuff */
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index c8afa6b1d91d..32a5f46b1157 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -121,8 +121,10 @@ static int get_target(const char *symname, struct path *path,
ret = -ENOENT;
path_put(path);
}
- } else
+ } else {
ret = -EPERM;
+ path_put(path);
+ }
}
return ret;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index b486169f42bf..274ac865bae8 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -160,15 +160,8 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
* block. A pointer to that is in the struct vfsmount that we
* have around.
*/
- if (!parent) {
- if (debugfs_mount && debugfs_mount->mnt_sb) {
- parent = debugfs_mount->mnt_sb->s_root;
- }
- }
- if (!parent) {
- pr_debug("debugfs: Ah! can not find a parent!\n");
- return -EFAULT;
- }
+ if (!parent)
+ parent = debugfs_mount->mnt_sb->s_root;
*dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index fbb6e5eed697..7cb0a59f4b9d 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
char *cipher_name, size_t *key_size)
{
char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
- char *full_alg_name;
+ char *full_alg_name = NULL;
int rc;
*key_tfm = NULL;
@@ -1763,7 +1763,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
if (rc)
goto out;
*key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
- kfree(full_alg_name);
if (IS_ERR(*key_tfm)) {
rc = PTR_ERR(*key_tfm);
printk(KERN_ERR "Unable to allocate crypto cipher with name "
@@ -1786,6 +1785,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
goto out;
}
out:
+ kfree(full_alg_name);
return rc;
}
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 9e944057001b..678172b61be2 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -158,7 +158,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
struct dentry *ecryptfs_dentry = file->f_path.dentry;
/* Private value of ecryptfs_dentry allocated in
* ecryptfs_lookup() */
- struct dentry *lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
+ struct dentry *lower_dentry;
struct ecryptfs_file_info *file_info;
mount_crypt_stat = &ecryptfs_superblock_to_private(
@@ -191,13 +191,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
- && !(file->f_flags & O_RDONLY)) {
- rc = -EPERM;
- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
- "file must hence be opened RO\n", __func__);
- goto out;
- }
if (!ecryptfs_inode_to_private(inode)->lower_file) {
rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
if (rc) {
@@ -208,6 +201,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
goto out;
}
}
+ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
+ && !(file->f_flags & O_RDONLY)) {
+ rc = -EPERM;
+ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
+ "file must hence be opened RO\n", __func__);
+ goto out;
+ }
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
@@ -299,7 +299,6 @@ static int ecryptfs_ioctl(struct inode *inode, struct file *file,
const struct file_operations ecryptfs_dir_fops = {
.readdir = ecryptfs_readdir,
.ioctl = ecryptfs_ioctl,
- .mmap = generic_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 429ca0b3ba08..4a430ab4115c 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -282,7 +282,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
goto out;
}
rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
- ecryptfs_dir_inode->i_sb, 1);
+ ecryptfs_dir_inode->i_sb,
+ ECRYPTFS_INTERPOSE_FLAG_D_ADD);
if (rc) {
printk(KERN_ERR "%s: Error interposing; rc = [%d]\n",
__func__, rc);
@@ -463,9 +464,6 @@ out_lock:
unlock_dir(lower_dir_dentry);
dput(lower_new_dentry);
dput(lower_old_dentry);
- d_drop(lower_old_dentry);
- d_drop(new_dentry);
- d_drop(old_dentry);
return rc;
}
@@ -614,6 +612,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
+ struct dentry *trap = NULL;
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
@@ -621,7 +620,17 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
- lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ /* source should not be ancestor of target */
+ if (trap == lower_old_dentry) {
+ rc = -EINVAL;
+ goto out_lock;
+ }
+ /* target should not be ancestor of source */
+ if (trap == lower_new_dentry) {
+ rc = -ENOTEMPTY;
+ goto out_lock;
+ }
rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (rc)
@@ -715,31 +724,31 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
/* Released in ecryptfs_put_link(); only release here on error */
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
- rc = -ENOMEM;
+ buf = ERR_PTR(-ENOMEM);
goto out;
}
old_fs = get_fs();
set_fs(get_ds());
rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
set_fs(old_fs);
- if (rc < 0)
- goto out_free;
- else
+ if (rc < 0) {
+ kfree(buf);
+ buf = ERR_PTR(rc);
+ } else
buf[rc] = '\0';
- rc = 0;
- nd_set_link(nd, buf);
- goto out;
-out_free:
- kfree(buf);
out:
- return ERR_PTR(rc);
+ nd_set_link(nd, buf);
+ return NULL;
}
static void
ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
{
- /* Free the char* */
- kfree(nd_get_link(nd));
+ char *buf = nd_get_link(nd);
+ if (!IS_ERR(buf)) {
+ /* Free the char* */
+ kfree(buf);
+ }
}
/**
@@ -772,18 +781,23 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
}
/**
- * ecryptfs_truncate
+ * truncate_upper
* @dentry: The ecryptfs layer dentry
- * @new_length: The length to expand the file to
+ * @ia: Address of the ecryptfs inode's attributes
+ * @lower_ia: Address of the lower inode's attributes
*
* Function to handle truncations modifying the size of the file. Note
* that the file sizes are interpolated. When expanding, we are simply
- * writing strings of 0's out. When truncating, we need to modify the
- * underlying file size according to the page index interpolations.
+ * writing strings of 0's out. When truncating, we truncate the upper
+ * inode and update the lower_ia according to the page index
+ * interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return,
+ * the caller must use lower_ia in a call to notify_change() to perform
+ * the truncation of the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+static int truncate_upper(struct dentry *dentry, struct iattr *ia,
+ struct iattr *lower_ia)
{
int rc = 0;
struct inode *inode = dentry->d_inode;
@@ -794,8 +808,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
loff_t lower_size_before_truncate;
loff_t lower_size_after_truncate;
- if (unlikely((new_length == i_size)))
+ if (unlikely((ia->ia_size == i_size))) {
+ lower_ia->ia_valid &= ~ATTR_SIZE;
goto out;
+ }
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
/* Set up a fake ecryptfs file, this is used to interface with
* the file in the underlying filesystem so that the
@@ -815,28 +831,30 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
&fake_ecryptfs_file,
ecryptfs_inode_to_private(dentry->d_inode)->lower_file);
/* Switch on growing or shrinking file */
- if (new_length > i_size) {
+ if (ia->ia_size > i_size) {
char zero[] = { 0x00 };
+ lower_ia->ia_valid &= ~ATTR_SIZE;
/* Write a single 0 at the last position of the file;
* this triggers code that will fill in 0's throughout
* the intermediate portion of the previous end of the
* file and the new and of the file */
rc = ecryptfs_write(&fake_ecryptfs_file, zero,
- (new_length - 1), 1);
- } else { /* new_length < i_size_read(inode) */
- /* We're chopping off all the pages down do the page
- * in which new_length is located. Fill in the end of
- * that page from (new_length & ~PAGE_CACHE_MASK) to
+ (ia->ia_size - 1), 1);
+ } else { /* ia->ia_size < i_size_read(inode) */
+ /* We're chopping off all the pages down to the page
+ * in which ia->ia_size is located. Fill in the end of
+ * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
* PAGE_CACHE_SIZE with zeros. */
size_t num_zeros = (PAGE_CACHE_SIZE
- - (new_length & ~PAGE_CACHE_MASK));
+ - (ia->ia_size & ~PAGE_CACHE_MASK));
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
- rc = vmtruncate(inode, new_length);
+ rc = vmtruncate(inode, ia->ia_size);
if (rc)
goto out_free;
- rc = vmtruncate(lower_dentry->d_inode, new_length);
+ lower_ia->ia_size = ia->ia_size;
+ lower_ia->ia_valid |= ATTR_SIZE;
goto out_free;
}
if (num_zeros) {
@@ -848,7 +866,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
goto out_free;
}
rc = ecryptfs_write(&fake_ecryptfs_file, zeros_virt,
- new_length, num_zeros);
+ ia->ia_size, num_zeros);
kfree(zeros_virt);
if (rc) {
printk(KERN_ERR "Error attempting to zero out "
@@ -857,7 +875,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
goto out_free;
}
}
- vmtruncate(inode, new_length);
+ vmtruncate(inode, ia->ia_size);
rc = ecryptfs_write_inode_size_to_metadata(inode);
if (rc) {
printk(KERN_ERR "Problem with "
@@ -870,10 +888,12 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
lower_size_before_truncate =
upper_size_to_lower_size(crypt_stat, i_size);
lower_size_after_truncate =
- upper_size_to_lower_size(crypt_stat, new_length);
- if (lower_size_after_truncate < lower_size_before_truncate)
- vmtruncate(lower_dentry->d_inode,
- lower_size_after_truncate);
+ upper_size_to_lower_size(crypt_stat, ia->ia_size);
+ if (lower_size_after_truncate < lower_size_before_truncate) {
+ lower_ia->ia_size = lower_size_after_truncate;
+ lower_ia->ia_valid |= ATTR_SIZE;
+ } else
+ lower_ia->ia_valid &= ~ATTR_SIZE;
}
out_free:
if (ecryptfs_file_to_private(&fake_ecryptfs_file))
@@ -883,6 +903,33 @@ out:
return rc;
}
+/**
+ * ecryptfs_truncate
+ * @dentry: The ecryptfs layer dentry
+ * @new_length: The length to expand the file to
+ *
+ * Simple function that handles the truncation of an eCryptfs inode and
+ * its corresponding lower inode.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+{
+ struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length };
+ struct iattr lower_ia = { .ia_valid = 0 };
+ int rc;
+
+ rc = truncate_upper(dentry, &ia, &lower_ia);
+ if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+ rc = notify_change(lower_dentry, &lower_ia);
+ mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ }
+ return rc;
+}
+
static int
ecryptfs_permission(struct inode *inode, int mask)
{
@@ -905,6 +952,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
{
int rc = 0;
struct dentry *lower_dentry;
+ struct iattr lower_ia;
struct inode *inode;
struct inode *lower_inode;
struct ecryptfs_crypt_stat *crypt_stat;
@@ -943,15 +991,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
}
}
mutex_unlock(&crypt_stat->cs_mutex);
+ memcpy(&lower_ia, ia, sizeof(lower_ia));
+ if (ia->ia_valid & ATTR_FILE)
+ lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
if (ia->ia_valid & ATTR_SIZE) {
- ecryptfs_printk(KERN_DEBUG,
- "ia->ia_valid = [0x%x] ATTR_SIZE" " = [0x%x]\n",
- ia->ia_valid, ATTR_SIZE);
- rc = ecryptfs_truncate(dentry, ia->ia_size);
- /* ecryptfs_truncate handles resizing of the lower file */
- ia->ia_valid &= ~ATTR_SIZE;
- ecryptfs_printk(KERN_DEBUG, "ia->ia_valid = [%x]\n",
- ia->ia_valid);
+ rc = truncate_upper(dentry, ia, &lower_ia);
if (rc < 0)
goto out;
}
@@ -960,17 +1004,32 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
* mode change is for clearing setuid/setgid bits. Allow lower fs
* to interpret this in its own way.
*/
- if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
- ia->ia_valid &= ~ATTR_MODE;
+ if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+ lower_ia.ia_valid &= ~ATTR_MODE;
mutex_lock(&lower_dentry->d_inode->i_mutex);
- rc = notify_change(lower_dentry, ia);
+ rc = notify_change(lower_dentry, &lower_ia);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
fsstack_copy_attr_all(inode, lower_inode);
return rc;
}
+int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct kstat lower_stat;
+ int rc;
+
+ rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
+ ecryptfs_dentry_to_lower(dentry), &lower_stat);
+ if (!rc) {
+ generic_fillattr(dentry->d_inode, stat);
+ stat->blocks = lower_stat.blocks;
+ }
+ return rc;
+}
+
int
ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
@@ -1100,6 +1159,7 @@ const struct inode_operations ecryptfs_dir_iops = {
const struct inode_operations ecryptfs_main_iops = {
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
+ .getattr = ecryptfs_getattr,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 567bc4b9f70a..ea2f92101dfe 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -585,8 +585,8 @@ out:
* with as much information as it can before needing
* the lower filesystem.
* ecryptfs_read_super(): this accesses the lower filesystem and uses
- * ecryptfs_interpolate to perform most of the linking
- * ecryptfs_interpolate(): links the lower filesystem into ecryptfs
+ * ecryptfs_interpose to perform most of the linking
+ * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c)
*/
static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data,
diff --git a/fs/eventfd.c b/fs/eventfd.c
index d26402ff06ea..7758cc382ef0 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -135,26 +135,71 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait)
return events;
}
-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+{
+ *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
+ ctx->count -= *cnt;
+}
+
+/**
+ * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
+ * @ctx: [in] Pointer to eventfd context.
+ * @wait: [in] Wait queue to be removed.
+ * @cnt: [out] Pointer to the 64bit conter value.
+ *
+ * Returns zero if successful, or the following error codes:
+ *
+ * -EAGAIN : The operation would have blocked.
+ *
+ * This is used to atomically remove a wait queue entry from the eventfd wait
+ * queue head, and read/reset the counter value.
+ */
+int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
+ __u64 *cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->wqh.lock, flags);
+ eventfd_ctx_do_read(ctx, cnt);
+ __remove_wait_queue(&ctx->wqh, wait);
+ if (*cnt != 0 && waitqueue_active(&ctx->wqh))
+ wake_up_locked_poll(&ctx->wqh, POLLOUT);
+ spin_unlock_irqrestore(&ctx->wqh.lock, flags);
+
+ return *cnt != 0 ? 0 : -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
+
+/**
+ * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
+ * @ctx: [in] Pointer to eventfd context.
+ * @no_wait: [in] Different from zero if the operation should not block.
+ * @cnt: [out] Pointer to the 64bit conter value.
+ *
+ * Returns zero if successful, or the following error codes:
+ *
+ * -EAGAIN : The operation would have blocked but @no_wait was nonzero.
+ * -ERESTARTSYS : A signal interrupted the wait operation.
+ *
+ * If @no_wait is zero, the function might sleep until the eventfd internal
+ * counter becomes greater than zero.
+ */
+ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
{
- struct eventfd_ctx *ctx = file->private_data;
ssize_t res;
- __u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);
- if (count < sizeof(ucnt))
- return -EINVAL;
spin_lock_irq(&ctx->wqh.lock);
+ *cnt = 0;
res = -EAGAIN;
if (ctx->count > 0)
- res = sizeof(ucnt);
- else if (!(file->f_flags & O_NONBLOCK)) {
+ res = 0;
+ else if (!no_wait) {
__add_wait_queue(&ctx->wqh, &wait);
- for (res = 0;;) {
+ for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (ctx->count > 0) {
- res = sizeof(ucnt);
+ res = 0;
break;
}
if (signal_pending(current)) {
@@ -168,18 +213,32 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (likely(res > 0)) {
- ucnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
- ctx->count -= ucnt;
+ if (likely(res == 0)) {
+ eventfd_ctx_do_read(ctx, cnt);
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, POLLOUT);
}
spin_unlock_irq(&ctx->wqh.lock);
- if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
- return -EFAULT;
return res;
}
+EXPORT_SYMBOL_GPL(eventfd_ctx_read);
+
+static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct eventfd_ctx *ctx = file->private_data;
+ ssize_t res;
+ __u64 cnt;
+
+ if (count < sizeof(cnt))
+ return -EINVAL;
+ res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt);
+ if (res < 0)
+ return res;
+
+ return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt);
+}
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
diff --git a/fs/exec.c b/fs/exec.c
index 632b02e34ec7..cce6bbdbdbb1 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
struct vm_area_struct *prev = NULL;
unsigned long vm_flags;
unsigned long stack_base;
+ unsigned long stack_size;
+ unsigned long stack_expand;
+ unsigned long rlim_stack;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size to 1GB */
@@ -627,10 +630,23 @@ int setup_arg_pages(struct linux_binprm *bprm,
goto out_unlock;
}
+ stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ stack_size = vma->vm_end - vma->vm_start;
+ /*
+ * Align this down to a page boundary as expand_stack
+ * will align it up.
+ */
+ rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
#ifdef CONFIG_STACK_GROWSUP
- stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_start + rlim_stack;
+ else
+ stack_base = vma->vm_end + stack_expand;
#else
- stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_end - rlim_stack;
+ else
+ stack_base = vma->vm_start - stack_expand;
#endif
ret = expand_stack(vma, stack_base);
if (ret)
@@ -941,9 +957,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
int flush_old_exec(struct linux_binprm * bprm)
{
- char * name;
- int i, ch, retval;
- char tcomm[sizeof(current->comm)];
+ int retval;
/*
* Make sure we have a private signal table and that
@@ -964,6 +978,25 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
+ current->flags &= ~PF_RANDOMIZE;
+ flush_thread();
+ current->personality &= ~bprm->per_clear;
+
+ return 0;
+
+out:
+ return retval;
+}
+EXPORT_SYMBOL(flush_old_exec);
+
+void setup_new_exec(struct linux_binprm * bprm)
+{
+ int i, ch;
+ char * name;
+ char tcomm[sizeof(current->comm)];
+
+ arch_pick_mmap_layout(current->mm);
+
/* This is the point of no return */
current->sas_ss_sp = current->sas_ss_size = 0;
@@ -985,9 +1018,6 @@ int flush_old_exec(struct linux_binprm * bprm)
tcomm[i] = '\0';
set_task_comm(current, tcomm);
- current->flags &= ~PF_RANDOMIZE;
- flush_thread();
-
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
@@ -1003,8 +1033,6 @@ int flush_old_exec(struct linux_binprm * bprm)
set_dumpable(current->mm, suid_dumpable);
}
- current->personality &= ~bprm->per_clear;
-
/*
* Flush performance counters when crossing a
* security domain:
@@ -1019,14 +1047,8 @@ int flush_old_exec(struct linux_binprm * bprm)
flush_signal_handlers(current, 0);
flush_old_files(current->files);
-
- return 0;
-
-out:
- return retval;
}
-
-EXPORT_SYMBOL(flush_old_exec);
+EXPORT_SYMBOL(setup_new_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index af7b62699ea9..874d169a193e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -361,14 +361,11 @@ struct ext4_new_group_data {
so set the magic i_delalloc_reserve_flag after taking the
inode allocation semaphore for */
#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
- /* Call ext4_da_update_reserve_space() after successfully
- allocating the blocks */
-#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008
/* caller is from the direct IO path, request to creation of an
unitialized extents if not allocated, split the uninitialized
extent if blocks has been preallocated already*/
-#define EXT4_GET_BLOCKS_DIO 0x0010
-#define EXT4_GET_BLOCKS_CONVERT 0x0020
+#define EXT4_GET_BLOCKS_DIO 0x0008
+#define EXT4_GET_BLOCKS_CONVERT 0x0010
#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
/* Convert extent to initialized after direct IO complete */
@@ -1443,6 +1440,8 @@ extern int ext4_block_truncate_page(handle_t *handle,
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int flush_aio_dio_completed_IO(struct inode *inode);
+extern void ext4_da_update_reserve_space(struct inode *inode,
+ int used, int quota_claim);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 7d7b74e94687..765a4826b118 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3132,7 +3132,19 @@ out:
unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
newblock + max_blocks,
allocated - max_blocks);
+ allocated = max_blocks;
}
+
+ /*
+ * If we have done fallocate with the offset that is already
+ * delayed allocated, we would have block reservation
+ * and quota reservation done in the delayed write path.
+ * But fallocate would have already updated quota and block
+ * count for this offset. So cancel these reservation
+ */
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ ext4_da_update_reserve_space(inode, allocated, 0);
+
map_out:
set_buffer_mapped(bh_result);
out1:
@@ -3368,9 +3380,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* previous routine could use block we allocated */
newblock = ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
+ if (allocated > max_blocks)
+ allocated = max_blocks;
set_buffer_new(bh_result);
/*
+ * Update reserved blocks/metadata blocks after successful
+ * block allocation which had been deferred till now.
+ */
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ ext4_da_update_reserve_space(inode, allocated, 1);
+
+ /*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an uninitialized extent.
*/
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c818972c8302..e11952404e02 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1053,11 +1053,12 @@ static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
* Called with i_data_sem down, which is important since we can call
* ext4_discard_preallocations() from here.
*/
-static void ext4_da_update_reserve_space(struct inode *inode, int used)
+void ext4_da_update_reserve_space(struct inode *inode,
+ int used, int quota_claim)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
- int mdb_free = 0;
+ int mdb_free = 0, allocated_meta_blocks = 0;
spin_lock(&ei->i_block_reservation_lock);
if (unlikely(used > ei->i_reserved_data_blocks)) {
@@ -1073,6 +1074,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
ei->i_reserved_data_blocks -= used;
used += ei->i_allocated_meta_blocks;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
+ allocated_meta_blocks = ei->i_allocated_meta_blocks;
ei->i_allocated_meta_blocks = 0;
percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
@@ -1090,9 +1092,23 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/* Update quota subsystem */
- vfs_dq_claim_block(inode, used);
- if (mdb_free)
- vfs_dq_release_reservation_block(inode, mdb_free);
+ if (quota_claim) {
+ vfs_dq_claim_block(inode, used);
+ if (mdb_free)
+ vfs_dq_release_reservation_block(inode, mdb_free);
+ } else {
+ /*
+ * We did fallocate with an offset that is already delayed
+ * allocated. So on delayed allocated writeback we should
+ * not update the quota for allocated blocks. But then
+ * converting an fallocate region to initialized region would
+ * have caused a metadata allocation. So claim quota for
+ * that
+ */
+ if (allocated_meta_blocks)
+ vfs_dq_claim_block(inode, allocated_meta_blocks);
+ vfs_dq_release_reservation_block(inode, mdb_free + used);
+ }
/*
* If we have done all the pending block allocations and if
@@ -1292,18 +1308,20 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
*/
EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
}
- }
+ /*
+ * Update reserved blocks/metadata blocks after successful
+ * block allocation which had been deferred till now. We don't
+ * support fallocate for non extent files. So we can update
+ * reserve space here.
+ */
+ if ((retval > 0) &&
+ (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
+ ext4_da_update_reserve_space(inode, retval, 1);
+ }
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
EXT4_I(inode)->i_delalloc_reserved_flag = 0;
- /*
- * Update reserved blocks/metadata blocks after successful
- * block allocation which had been deferred till now.
- */
- if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
- ext4_da_update_reserve_space(inode, retval);
-
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && buffer_mapped(bh)) {
int ret = check_block_validity(inode, "file system "
@@ -1835,24 +1853,12 @@ repeat:
* later. Real quota accounting is done at pages writeout
* time.
*/
- if (vfs_dq_reserve_block(inode, md_needed + 1)) {
- /*
- * We tend to badly over-estimate the amount of
- * metadata blocks which are needed, so if we have
- * reserved any metadata blocks, try to force out the
- * inode and see if we have any better luck.
- */
- if (md_reserved && retries++ <= 3)
- goto retry;
+ if (vfs_dq_reserve_block(inode, md_needed + 1))
return -EDQUOT;
- }
if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
vfs_dq_release_reservation_block(inode, md_needed + 1);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- retry:
- if (md_reserved)
- write_inode_now(inode, (retries == 3));
yield();
goto repeat;
}
@@ -2213,10 +2219,10 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
* variables are updated after the blocks have been allocated.
*/
new.b_state = 0;
- get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
- EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+ get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
if (mpd->b_state & (1 << BH_Delay))
- get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
+ get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
&new, get_blocks_flags);
if (blks < 0) {
@@ -3032,7 +3038,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- int ret, retries = 0;
+ int ret, retries = 0, quota_retries = 0;
struct page *page;
pgoff_t index;
unsigned from, to;
@@ -3091,6 +3097,22 @@ retry:
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
+
+ if ((ret == -EDQUOT) &&
+ EXT4_I(inode)->i_reserved_meta_blocks &&
+ (quota_retries++ < 3)) {
+ /*
+ * Since we often over-estimate the number of meta
+ * data blocks required, we may sometimes get a
+ * spurios out of quota error even though there would
+ * be enough space once we write the data blocks and
+ * find out how many meta data blocks were _really_
+ * required. So try forcing the inode write to see if
+ * that helps.
+ */
+ write_inode_now(inode, (quota_retries == 3));
+ goto retry;
+ }
out:
return ret;
}
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 2cf93ec40a67..97e01dc0d95f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -618,60 +618,90 @@ static DEFINE_RWLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __read_mostly;
/*
- * fasync_helper() is used by almost all character device drivers
- * to set up the fasync queue. It returns negative on error, 0 if it did
- * no changes and positive if it added/deleted the entry.
+ * Remove a fasync entry. If successfully removed, return
+ * positive and clear the FASYNC flag. If no entry exists,
+ * do nothing and return 0.
+ *
+ * NOTE! It is very important that the FASYNC flag always
+ * match the state "is the filp on a fasync list".
+ *
+ * We always take the 'filp->f_lock', in since fasync_lock
+ * needs to be irq-safe.
*/
-int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
+static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
- struct fasync_struct *new = NULL;
int result = 0;
- if (on) {
- new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
+ spin_lock(&filp->f_lock);
+ write_lock_irq(&fasync_lock);
+ for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
+ if (fa->fa_file != filp)
+ continue;
+ *fp = fa->fa_next;
+ kmem_cache_free(fasync_cache, fa);
+ filp->f_flags &= ~FASYNC;
+ result = 1;
+ break;
}
+ write_unlock_irq(&fasync_lock);
+ spin_unlock(&filp->f_lock);
+ return result;
+}
+
+/*
+ * Add a fasync entry. Return negative on error, positive if
+ * added, and zero if did nothing but change an existing one.
+ *
+ * NOTE! It is very important that the FASYNC flag always
+ * match the state "is the filp on a fasync list".
+ */
+static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+{
+ struct fasync_struct *new, *fa, **fp;
+ int result = 0;
+
+ new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
- /*
- * We need to take f_lock first since it's not an IRQ-safe
- * lock.
- */
spin_lock(&filp->f_lock);
write_lock_irq(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
- if (fa->fa_file == filp) {
- if(on) {
- fa->fa_fd = fd;
- kmem_cache_free(fasync_cache, new);
- } else {
- *fp = fa->fa_next;
- kmem_cache_free(fasync_cache, fa);
- result = 1;
- }
- goto out;
- }
+ if (fa->fa_file != filp)
+ continue;
+ fa->fa_fd = fd;
+ kmem_cache_free(fasync_cache, new);
+ goto out;
}
- if (on) {
- new->magic = FASYNC_MAGIC;
- new->fa_file = filp;
- new->fa_fd = fd;
- new->fa_next = *fapp;
- *fapp = new;
- result = 1;
- }
+ new->magic = FASYNC_MAGIC;
+ new->fa_file = filp;
+ new->fa_fd = fd;
+ new->fa_next = *fapp;
+ *fapp = new;
+ result = 1;
+ filp->f_flags |= FASYNC;
+
out:
- if (on)
- filp->f_flags |= FASYNC;
- else
- filp->f_flags &= ~FASYNC;
write_unlock_irq(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
+/*
+ * fasync_helper() is used by almost all character device drivers
+ * to set up the fasync queue, and for regular files by the file
+ * lease code. It returns negative on error, 0 if it did no changes
+ * and positive if it added/deleted the entry.
+ */
+int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
+{
+ if (!on)
+ return fasync_remove_entry(filp, fapp);
+ return fasync_add_entry(fd, filp, fapp);
+}
+
EXPORT_SYMBOL(fasync_helper);
void __kill_fasync(struct fasync_struct *fa, int sig, int band)
diff --git a/fs/file_table.c b/fs/file_table.c
index 69652c5bd5f0..b98404b54383 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -253,6 +253,7 @@ void __fput(struct file *file)
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
+ ima_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c18913a777ae..a9f5e137f1d3 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -828,6 +828,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
if (!page)
break;
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_page(page);
+
pagefault_disable();
tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
pagefault_enable();
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6d47379e794b..583e823307ae 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
*ptr++ = cpu_to_be64(bn++);
break;
}
- } while (state != ALLOC_DATA);
+ } while ((state != ALLOC_DATA) || !dblock);
ip->i_height = height;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f455a03a09e2..f42663325931 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -769,6 +769,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!gl)
return -ENOMEM;
+ atomic_inc(&sdp->sd_glock_disposal);
gl->gl_flags = 0;
gl->gl_name = name;
atomic_set(&gl->gl_ref, 1);
@@ -1538,6 +1539,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
up_write(&gfs2_umount_flush_sem);
msleep(10);
}
+ flush_workqueue(glock_workqueue);
+ wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
+ gfs2_dump_lockstate(sdp);
}
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 13f0bd228132..c0262faf4725 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -123,7 +123,7 @@ struct lm_lockops {
int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
void (*lm_unmount) (struct gfs2_sbd *sdp);
void (*lm_withdraw) (struct gfs2_sbd *sdp);
- void (*lm_put_lock) (struct kmem_cache *cachep, void *gl);
+ void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
unsigned int (*lm_lock) (struct gfs2_glock *gl,
unsigned int req_state, unsigned int flags);
void (*lm_cancel) (struct gfs2_glock *gl);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4792200978c8..bc0ad158e6b4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -544,6 +544,8 @@ struct gfs2_sbd {
struct gfs2_holder sd_live_gh;
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_trans_gl;
+ wait_queue_head_t sd_glock_wait;
+ atomic_t sd_glock_disposal;
/* Inode Stuff */
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 46df988323bc..0e5e0e7022e5 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -21,6 +21,7 @@ static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -30,6 +31,8 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
kmem_cache_free(gfs2_glock_cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
ret |= LM_OUT_CANCELED;
@@ -164,14 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl,
return LM_OUT_ASYNC;
}
-static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr)
+static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
{
- struct gfs2_glock *gl = ptr;
- struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
if (gl->gl_lksb.sb_lkid == 0) {
kmem_cache_free(cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
return;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index edfee24f3636..a86ed6381566 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -82,6 +82,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
gfs2_tune_init(&sdp->sd_tune);
+ init_waitqueue_head(&sdp->sd_glock_wait);
+ atomic_set(&sdp->sd_glock_disposal, 0);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
@@ -723,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
goto fail;
}
- error = -EINVAL;
+ error = -EUSERS;
if (!gfs2_jindex_size(sdp)) {
fs_err(sdp, "no journals!\n");
goto fail_jindex;
@@ -983,9 +985,17 @@ static const match_table_t nolock_tokens = {
{ Opt_err, NULL },
};
+static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ kmem_cache_free(cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
+}
+
static const struct lm_lockops nolock_ops = {
.lm_proto_name = "lock_nolock",
- .lm_put_lock = kmem_cache_free,
+ .lm_put_lock = nolock_put_lock,
.lm_tokens = &nolock_tokens,
};
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 78f73ca1ef3e..84350e1be66d 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -1088,7 +1088,8 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
error = vfs_follow_link(nd, buf);
if (buf != array)
kfree(buf);
- }
+ } else
+ path_put(&nd->path);
return ERR_PTR(error);
}
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 0608f490c295..503b842f3ba2 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -591,11 +591,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
u64 rgrp_count = ip->i_disksize;
int error;
- if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) {
- gfs2_consist_inode(ip);
- return -EIO;
- }
-
+ do_div(rgrp_count, sizeof(struct gfs2_rindex));
clear_rgrpdi(sdp);
file_ra_state_init(&ra_state, inode->i_mapping);
@@ -915,7 +911,7 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
{
BUG_ON(ip->i_alloc != NULL);
- ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_KERNEL);
+ ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS);
return ip->i_alloc;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c282ad41f3d1..b9dd3da22c0a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -21,6 +21,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/time.h>
+#include <linux/wait.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index a5089a6dd67a..7239efc690d8 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -646,22 +646,27 @@ static const struct super_operations hppfs_sbops = {
static int hppfs_readlink(struct dentry *dentry, char __user *buffer,
int buflen)
{
- struct dentry *proc_dentry;
-
- proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+ struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer,
buflen);
}
static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *proc_dentry;
-
- proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+ struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd);
}
+static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+
+ if (proc_dentry->d_inode->i_op->put_link)
+ proc_dentry->d_inode->i_op->put_link(proc_dentry, nd, cookie);
+}
+
static const struct inode_operations hppfs_dir_iops = {
.lookup = hppfs_lookup,
};
@@ -669,6 +674,7 @@ static const struct inode_operations hppfs_dir_iops = {
static const struct inode_operations hppfs_link_iops = {
.readlink = hppfs_readlink,
.follow_link = hppfs_follow_link,
+ .put_link = hppfs_put_link,
};
static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
diff --git a/fs/namei.c b/fs/namei.c
index b55440baf7ab..a4855af776a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -561,6 +561,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
dget(dentry);
}
mntget(path->mnt);
+ nd->last_type = LAST_BIND;
cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(cookie);
if (!IS_ERR(cookie)) {
@@ -822,6 +823,17 @@ fail:
}
/*
+ * This is a temporary kludge to deal with "automount" symlinks; proper
+ * solution is to trigger them on follow_mount(), so that do_lookup()
+ * would DTRT. To be killed before 2.6.34-final.
+ */
+static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
+{
+ return inode && unlikely(inode->i_op->follow_link) &&
+ ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
+}
+
+/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
@@ -941,8 +953,7 @@ last_component:
if (err)
break;
inode = next.dentry->d_inode;
- if ((lookup_flags & LOOKUP_FOLLOW)
- && inode && inode->i_op->follow_link) {
+ if (follow_on_final(inode, lookup_flags)) {
err = do_follow_link(&next, nd);
if (err)
goto return_err;
@@ -1603,11 +1614,12 @@ struct file *do_filp_open(int dfd, const char *pathname,
struct file *filp;
struct nameidata nd;
int error;
- struct path path, save;
+ struct path path;
struct dentry *dir;
int count = 0;
int will_truncate;
int flag = open_to_namei_flags(open_flag);
+ int force_reval = 0;
/*
* O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
@@ -1619,7 +1631,7 @@ struct file *do_filp_open(int dfd, const char *pathname,
open_flag |= O_DSYNC;
if (!acc_mode)
- acc_mode = MAY_OPEN | ACC_MODE(flag);
+ acc_mode = MAY_OPEN | ACC_MODE(open_flag);
/* O_TRUNC implies we need access checks for write permissions */
if (flag & O_TRUNC)
@@ -1659,9 +1671,12 @@ struct file *do_filp_open(int dfd, const char *pathname,
/*
* Create - we need to know the parent.
*/
+reval:
error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
if (error)
return ERR_PTR(error);
+ if (force_reval)
+ nd.flags |= LOOKUP_REVAL;
error = path_walk(pathname, &nd);
if (error) {
if (nd.root.mnt)
@@ -1731,8 +1746,7 @@ do_last:
if (nd.root.mnt)
path_put(&nd.root);
if (!IS_ERR(filp)) {
- error = ima_path_check(&filp->f_path, filp->f_mode &
- (MAY_READ | MAY_WRITE | MAY_EXEC));
+ error = ima_file_check(filp, acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
@@ -1792,8 +1806,7 @@ ok:
}
filp = nameidata_to_filp(&nd);
if (!IS_ERR(filp)) {
- error = ima_path_check(&filp->f_path, filp->f_mode &
- (MAY_READ | MAY_WRITE | MAY_EXEC));
+ error = ima_file_check(filp, acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
@@ -1853,17 +1866,7 @@ do_link:
error = security_inode_follow_link(path.dentry, &nd);
if (error)
goto exit_dput;
- save = nd.path;
- path_get(&save);
error = __do_follow_link(&path, &nd);
- if (error == -ESTALE) {
- /* nd.path had been dropped */
- nd.path = save;
- path_get(&nd.path);
- nd.flags |= LOOKUP_REVAL;
- error = __do_follow_link(&path, &nd);
- }
- path_put(&save);
path_put(&path);
if (error) {
/* Does someone understand code flow here? Or it is only
@@ -1873,6 +1876,10 @@ do_link:
release_open_intent(&nd);
if (nd.root.mnt)
path_put(&nd.root);
+ if (error == -ESTALE && !force_reval) {
+ force_reval = 1;
+ goto reval;
+ }
return ERR_PTR(error);
}
nd.flags &= ~LOOKUP_PARENT;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7d70d63ceb29..c768f733c8d6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -965,10 +965,12 @@ EXPORT_SYMBOL(may_umount_tree);
int may_umount(struct vfsmount *mnt)
{
int ret = 1;
+ down_read(&namespace_sem);
spin_lock(&vfsmount_lock);
if (propagate_mount_busy(mnt, 2))
ret = 0;
spin_unlock(&vfsmount_lock);
+ up_read(&namespace_sem);
return ret;
}
@@ -1352,12 +1354,12 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
if (err)
goto out_cleanup_ids;
+ spin_lock(&vfsmount_lock);
+
if (IS_MNT_SHARED(dest_mnt)) {
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
}
-
- spin_lock(&vfsmount_lock);
if (parent_path) {
detach_mnt(source_mnt, parent_path);
attach_mnt(source_mnt, path);
@@ -1534,8 +1536,12 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
err = change_mount_flags(path->mnt, flags);
else
err = do_remount_sb(sb, flags, data, 0);
- if (!err)
+ if (!err) {
+ spin_lock(&vfsmount_lock);
+ mnt_flags |= path->mnt->mnt_flags & MNT_PNODE_MASK;
path->mnt->mnt_flags = mnt_flags;
+ spin_unlock(&vfsmount_lock);
+ }
up_write(&sb->s_umount);
if (!err) {
security_sb_post_remount(path->mnt, flags, data);
@@ -1665,6 +1671,8 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
{
int err;
+ mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD);
+
down_write(&namespace_sem);
/* Something was mounted here while we slept */
while (d_mountpoint(path->dentry) &&
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e1d415e97849..0d289823e856 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.eof = 0;
data->res.count = bytes;
+ nfs_fattr_init(&data->fattr);
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
@@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
NFS_PROTO(data->inode)->commit_setup(data, &msg);
@@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.count = bytes;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6b891328f332..63f2071d6445 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -486,6 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
{
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+ if (gfp & __GFP_WAIT)
+ nfs_wb_page(page->mapping->host, page);
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index fa588006588d..237874f1af23 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
*/
int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
- struct nfs_inode *nfsi = NFS_I(page->mapping->host);
- struct fscache_cookie *cookie = nfsi->fscache;
-
- BUG_ON(!cookie);
-
if (PageFsCache(page)) {
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ struct fscache_cookie *cookie = nfsi->fscache;
+
+ BUG_ON(!cookie);
dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
cookie, page, nfsi);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index faa091865ad0..f141bde7756a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1261,8 +1261,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
+ umode_t newmode = inode->i_mode & S_IFMT;
+ newmode |= fattr->mode & S_IALLUGO;
+ inode->i_mode = newmode;
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
- inode->i_mode = fattr->mode;
}
} else if (server->caps & NFS_CAP_MODE)
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 0adefc40cc89..59047f8d7d72 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -120,7 +120,7 @@ static struct {
{ .status = MNT3ERR_INVAL, .errno = -EINVAL, },
{ .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
{ .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
- { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
};
struct mountres {
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5e078b222b4e..7bc2da8efd4a 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -699,7 +699,7 @@ static struct {
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
{ NFSERR_NOTSUPP, -ENOTSUPP },
{ NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -ESERVERFAULT },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
{ -1, -EIO }
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 865265bdca03..0c6fda33d66e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -146,6 +146,7 @@ enum {
NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
+ NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
};
struct nfs4_state {
@@ -277,6 +278,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs4_schedule_state_recovery(struct nfs_client *);
extern void nfs4_schedule_state_manager(struct nfs_client *);
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
+extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 198d51d17c13..375f0fae2c6a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -249,19 +249,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
if (state == NULL)
break;
nfs4_state_mark_reclaim_nograce(clp, state);
- case -NFS4ERR_STALE_CLIENTID:
+ goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_EXPIRED:
- nfs4_schedule_state_recovery(clp);
- ret = nfs4_wait_clnt_recover(clp);
- if (ret == 0)
- exception->retry = 1;
-#if !defined(CONFIG_NFS_V4_1)
- break;
-#else /* !defined(CONFIG_NFS_V4_1) */
- if (!nfs4_has_session(server->nfs_client))
+ if (state == NULL)
break;
- /* FALLTHROUGH */
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_EXPIRED:
+ goto do_state_recovery;
+#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
@@ -274,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
nfs4_schedule_state_recovery(clp);
exception->retry = 1;
break;
-#endif /* !defined(CONFIG_NFS_V4_1) */
+#endif /* defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
if (exception->timeout > HZ) {
/* We have retried a decent amount, time to
@@ -293,6 +289,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
}
/* We failed to handle the error */
return nfs4_map_errors(ret);
+do_state_recovery:
+ nfs4_schedule_state_recovery(clp);
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret == 0)
+ exception->retry = 1;
+ return ret;
}
@@ -1658,6 +1660,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
+ if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+ set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
@@ -3422,15 +3426,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
if (state == NULL)
break;
nfs4_state_mark_reclaim_nograce(clp, state);
- case -NFS4ERR_STALE_CLIENTID:
+ goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
+ if (state == NULL)
+ break;
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
- rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
- nfs4_schedule_state_recovery(clp);
- if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
- rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
- task->tk_status = 0;
- return -EAGAIN;
+ goto do_state_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
@@ -3458,6 +3461,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
}
task->tk_status = nfs4_map_errors(task->tk_status);
return 0;
+do_state_recovery:
+ rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
+ nfs4_schedule_state_recovery(clp);
+ if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
+ rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
+ task->tk_status = 0;
+ return -EAGAIN;
}
static int
@@ -4088,6 +4098,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
.rpc_release = nfs4_lock_release,
};
+static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state *state = lsp->ls_state;
+
+ switch (error) {
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ if (new_lock_owner != 0 ||
+ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ break;
+ case -NFS4ERR_STALE_STATEID:
+ if (new_lock_owner != 0 ||
+ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ };
+}
+
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
{
struct nfs4_lockdata *data;
@@ -4126,6 +4158,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
+ if (ret)
+ nfs4_handle_setlk_error(data->server, data->lsp,
+ data->arg.new_lock_owner, ret);
} else
data->cancelled = 1;
rpc_put_task(task);
@@ -4181,8 +4216,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
- int status;
+ int status = -ENOLCK;
+ if ((fl_flags & FL_POSIX) &&
+ !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
+ goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6d263ed79e92..c1e2733f4fa4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
nfs4_schedule_state_manager(clp);
}
-static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
+int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
{
set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e437fd6a819f..5cd5184b56db 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr,
* If the server returns different values for sessionID, slotID or
* sequence number, the server is looney tunes.
*/
- status = -ESERVERFAULT;
+ status = -EREMOTEIO;
if (memcmp(id.data, res->sr_session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
@@ -5774,7 +5774,7 @@ static struct {
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
{ NFS4ERR_BADTYPE, -EBADTYPE },
{ NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_SYMLINK, -ELOOP },
@@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat)
}
if (stat <= 10000 || stat > 10100) {
/* The server is looney tunes. */
- return -ESERVERFAULT;
+ return -EREMOTEIO;
}
/* If we cannot translate the error, the recovery routines should
* handle it.
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e2975939126a..a12c45b65dd4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req)
kref_put(&req->wb_kref, nfs_free_request);
}
+static int nfs_wait_bit_uninterruptible(void *word)
+{
+ io_schedule();
+ return 0;
+}
+
/**
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
@@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req)
int
nfs_wait_on_request(struct nfs_page *req)
{
- int ret = 0;
-
- if (!test_bit(PG_BUSY, &req->wb_flags))
- goto out;
- ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
- nfs_wait_bit_killable, TASK_KILLABLE);
-out:
- return ret;
+ return wait_on_bit(&req->wb_flags, PG_BUSY,
+ nfs_wait_bit_uninterruptible,
+ TASK_UNINTERRUPTIBLE);
}
/**
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ce907efc5508..f1afee4eea77 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -243,6 +243,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *);
static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
static int nfs_xdev_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static void nfs_put_super(struct super_block *);
static void nfs_kill_super(struct super_block *);
static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
@@ -266,6 +267,7 @@ static const struct super_operations nfs_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs_clear_inode,
.umount_begin = nfs_umount_begin,
@@ -335,6 +337,7 @@ static const struct super_operations nfs4_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs4_clear_inode,
.umount_begin = nfs_umount_begin,
@@ -2258,6 +2261,17 @@ error_splat_super:
}
/*
+ * Ensure that we unregister the bdi before kill_anon_super
+ * releases the device name
+ */
+static void nfs_put_super(struct super_block *s)
+{
+ struct nfs_server *server = NFS_SB(s);
+
+ bdi_unregister(&server->backing_dev_info);
+}
+
+/*
* Destroy an NFS2/3 superblock
*/
static void nfs_kill_super(struct super_block *s)
@@ -2265,7 +2279,6 @@ static void nfs_kill_super(struct super_block *s)
struct nfs_server *server = NFS_SB(s);
kill_anon_super(s);
- bdi_unregister(&server->backing_dev_info);
nfs_fscache_release_super_cookie(s);
nfs_free_server(server);
}
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index 70e1fbbaaeab..ad4d2e787b20 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -15,8 +15,10 @@
#include "callback.h"
+#ifdef CONFIG_NFS_V4
static const int nfs_set_port_min = 0;
static const int nfs_set_port_max = 65535;
+#endif
static struct ctl_table_header *nfs_callback_sysctl_table;
static ctl_table nfs_cb_sysctls[] = {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d171696017f4..d63d964a0392 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1233,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-void nfs_commitdata_release(void *data)
+static void nfs_commitdata_release(void *data)
{
struct nfs_write_data *wdata = data;
@@ -1541,6 +1541,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
break;
}
ret = nfs_wait_on_request(req);
+ nfs_release_request(req);
if (ret < 0)
goto out;
}
@@ -1597,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct nfs_page *req;
int ret;
- if (PageFsCache(page))
- nfs_fscache_release_page(page, GFP_KERNEL);
+ nfs_fscache_release_page(page, GFP_KERNEL);
req = nfs_find_and_lock_request(page);
ret = PTR_ERR(req);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c487810a2366..a0c4016413f1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
{
- struct svc_export *exp;
u32 fsidv[2];
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
- exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
- /*
- * We shouldn't have accepting an nfsv4 request at all if we
- * don't have a pseudoexport!:
- */
- if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
- exp = ERR_PTR(-ESERVERFAULT);
- return exp;
+ return rqst_exp_find(rqstp, FSID_NUM, fsidv);
}
/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8afdba5082e8..43bd776c4882 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -780,6 +780,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
+ else
+ host_err = ima_file_check(*filp, access);
out_nfserr:
err = nfserrno(host_err);
out:
@@ -2120,7 +2122,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
*/
path.mnt = exp->ex_path.mnt;
path.dentry = dentry;
- err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
nfsd_out:
return err? nfserrno(err) : 0;
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 17584c524486..105b508b47a8 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2829,7 +2829,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
- if (flag || nilfs_segctor_confirm(sci))
+ if (flag || !nilfs_segctor_confirm(sci))
nilfs_segctor_write_out(sci);
WARN_ON(!list_empty(&sci->sc_copied_buffers));
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index c9ee67b442e1..1afb0a10229f 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -121,7 +121,7 @@ static int idr_callback(int id, void *p, void *data)
if (warned)
return 0;
- warned = false;
+ warned = true;
entry = p;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 8271cf05c957..a94e8bd8eb1f 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -552,7 +552,7 @@ retry:
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
- group->inotify_data.last_wd,
+ group->inotify_data.last_wd+1,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
@@ -632,7 +632,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
- group->inotify_data.last_wd = 1;
+ group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3dae4a13f6e4..7e9df11260f4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@ bail:
return ret;
}
-/*
+/*
* ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
* particularly interested in the aio/dio case. Like the core uses
* i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
inode->i_sb->s_bdev, iov, offset,
- nr_segs,
+ nr_segs,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io);
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d43d34a1dd31..21c808f752d8 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
ocfs2_metadata_cache_io_unlock(ci);
- mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
+ mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
(unsigned long long)block, nr,
((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
flags);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index eda5b8bcddd5..5c9890006708 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
-/* Only sets a new threshold if there are no active regions.
+/* Only sets a new threshold if there are no active regions.
*
* No locking or otherwise interesting code is required for reading
* o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
"milliseconds\n", reg->hr_dev_name,
- jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
+ jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
o2quo_disk_timeout();
}
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
"seq %llu last %llu changed %u equal %u\n",
slot->ds_node_num, (long long)slot->ds_last_generation,
le32_to_cpu(hb_block->hb_cksum),
- (unsigned long long)le64_to_cpu(hb_block->hb_seq),
+ (unsigned long long)le64_to_cpu(hb_block->hb_seq),
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
slot->ds_equal_samples);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 334f231a422c..d8d0c65ac03c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_INFO "o2net: no longer connected to "
+ printk(KERN_NOTICE "o2net: no longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
if (!was_valid && valid) {
o2quo_conn_up(o2net_num_from_nn(nn));
cancel_delayed_work(&nn->nn_connect_expired);
- printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
+ printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
"connected to" : "accepted connection from",
SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
cond_resched();
continue;
}
- mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
+ mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
o2net_ensure_shutdown(nn, sc, 0);
break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
do_gettimeofday(&now);
- printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
+ printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
"seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
mlog(ML_NOTICE, "here are some times that might help debug the "
"situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
"%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
- sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
+ sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
now.tv_sec, (long) now.tv_usec,
sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
sc->sc_tv_advance_start.tv_sec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..96fa7ebc530c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -32,10 +32,10 @@
* on their number */
#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
-/*
+/*
* This version number represents quite a lot, unfortunately. It not
* only represents the raw network message protocol on the wire but also
- * locking semantics of the file system using the protocol. It should
+ * locking semantics of the file system using the protocol. It should
* be somewhere else, I'm sure, but right now it isn't.
*
* With version 11, we separate out the filesystem locking portion. The
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index b5786a787fab..3cfa114aa391 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
} while (0)
-#define DLM_LKSB_UNUSED1 0x01
+#define DLM_LKSB_UNUSED1 0x01
#define DLM_LKSB_PUT_LVB 0x02
#define DLM_LKSB_GET_LVB 0x04
#define DLM_LKSB_UNUSED2 0x08
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 01cf8cc3d286..dccc439fa087 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
dlm_lock_put(lock);
/* free up the reserved bast that we are cancelling.
* guaranteed that this will not be the last reserved
- * ast because *both* an ast and a bast were reserved
+ * ast because *both* an ast and a bast were reserved
* to get to this point. the res->spinlock will not be
* taken here */
dlm_lockres_release_ast(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ca96bce50e18..f283bce776b4 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
/* instead of logging the same network error over
* and over, sleep here and wait for the heartbeat
* to notice the node is dead. times out after 5s. */
- dlm_wait_for_node_death(dlm, res->owner,
+ dlm_wait_for_node_death(dlm, res->owner,
DLM_NODE_DEATH_WAIT_MAX);
ret = DLM_RECOVERING;
mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 42b0bad7a612..0cd24cf54396 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
assert_spin_locked(&res->spinlock);
stringify_lockname(res->lockname.name, res->lockname.len,
- buf, sizeof(buf) - 1);
+ buf, sizeof(buf));
printk("lockres: %s, owner=%u, state=%u\n",
buf, res->owner, res->state);
printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0334000676d3..988c9055fd4e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
}
/* Once the dlm ctxt is marked as leaving then we don't want
- * to be put in someone's domain map.
+ * to be put in someone's domain map.
* Also, explicitly disallow joining at certain troublesome
* times (ie. during recovery). */
if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 437698e9465f..733337772671 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
}
dlm_revert_pending_lock(res, lock);
dlm_lock_put(lock);
- } else if (dlm_is_recovery_lock(res->lockname.name,
+ } else if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* special case for the $RECOVERY lock.
* there will never be an AST delivered to put
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 03ccf9a7b1f4..a659606dcb95 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
struct dlm_master_list_entry *mle;
assert_spin_locked(&dlm->spinlock);
-
+
list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
if (node_up)
dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
__dlm_insert_mle(dlm, mle);
/* still holding the dlm spinlock, check the recovery map
- * to see if there are any nodes that still need to be
+ * to see if there are any nodes that still need to be
* considered. these will not appear in the mle nodemap
* but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
msleep(500);
}
continue;
- }
+ }
dlm_kick_recovery_thread(dlm);
msleep(1000);
@@ -939,8 +939,8 @@ wait:
res->lockname.name, blocked);
if (++tries > 20) {
mlog(ML_ERROR, "%s:%.*s: spinning on "
- "dlm_wait_for_lock_mastery, blocked=%d\n",
- dlm->name, res->lockname.len,
+ "dlm_wait_for_lock_mastery, blocked=%d\n",
+ dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
b = (mle->type == DLM_MLE_BLOCK);
if ((*blocked && !b) || (!*blocked && b)) {
- mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
+ mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
dlm->name, res->lockname.len, res->lockname.name,
*blocked, b);
*blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
}
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
dlm->node_num, res->lockname.len, res->lockname.name);
- ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
+ ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
DLM_ASSERT_MASTER_MLE_CLEANUP);
if (ret < 0) {
mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
if (r & DLM_ASSERT_RESPONSE_REASSERT) {
mlog(0, "%.*s: node %u create mles on other "
- "nodes and requests a re-assert\n",
+ "nodes and requests a re-assert\n",
namelen, lockname, to);
reassert = 1;
}
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
goto done;
- }
+ }
}
}
spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
int extra_ref = 0;
int nn = -1;
int rr, err = 0;
-
+
spin_lock(&mle->spinlock);
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
/* MASTER mle: if any bits set in the response map
* then the calling node needs to re-assert to clear
* up nodes that this node contacted */
- while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
+ while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
nn+1)) < O2NM_MAX_NODES) {
if (nn != dlm->node_num && nn != assert->node_idx)
master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
__dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
- *ret_data = (void *)res;
+ *ret_data = (void *)res;
dlm_put(dlm);
return -EINVAL;
}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
item->u.am.request_from = request_from;
item->u.am.flags = flags;
- if (ignore_higher)
- mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
+ if (ignore_higher)
+ mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
res->lockname.name);
-
+
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
* think that $RECOVERY is currently mastered by a dead node. If so,
* we wait a short time to allow that node to get notified by its own
* heartbeat stack, then check again. All $RECOVERY lock resources
- * mastered by dead nodes are purged when the hearbeat callback is
+ * mastered by dead nodes are purged when the hearbeat callback is
* fired, so we can know for sure that it is safe to continue once
* the node returns a live node or no node. */
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
ret = -EAGAIN;
}
spin_unlock(&dlm->spinlock);
- mlog(0, "%s: reco lock master is %u\n", dlm->name,
+ mlog(0, "%s: reco lock master is %u\n", dlm->name,
master);
break;
}
@@ -2602,7 +2602,7 @@ fail:
mlog(0, "%s:%.*s: timed out during migration\n",
dlm->name, res->lockname.len, res->lockname.name);
- /* avoid hang during shutdown when migrating lockres
+ /* avoid hang during shutdown when migrating lockres
* to a node which also goes down */
if (dlm_is_node_dead(dlm, target)) {
mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
spin_unlock(&res->spinlock);
- /* target has died, so make the caller break out of the
+ /* target has died, so make the caller break out of the
* wait_event, but caller must recheck the domain_map */
spin_lock(&dlm->spinlock);
if (!test_bit(mig_target, dlm->domain_map))
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 2f9e4e19a4f2..344bcf90cbf4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
if (lock->ml.node == dead_node) {
mlog(0, "AHA! there was "
"a $RECOVERY lock for dead "
- "node %u (%s)!\n",
+ "node %u (%s)!\n",
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
mres->master = master;
}
+static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
+ struct dlm_migratable_lockres *mres,
+ int queue)
+{
+ if (!lock->lksb)
+ return;
+
+ /* Ignore lvb in all locks in the blocked list */
+ if (queue == DLM_BLOCKED_LIST)
+ return;
+
+ /* Only consider lvbs in locks with granted EX or PR lock levels */
+ if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
+ return;
+
+ if (dlm_lvb_is_empty(mres->lvb)) {
+ memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+ return;
+ }
+
+ /* Ensure the lvb copied for migration matches in other valid locks */
+ if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
+ return;
+
+ mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
+ "node=%u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->lockres->lockname.len, lock->lockres->lockname.name,
+ lock->ml.node);
+ dlm_print_one_lock_resource(lock->lockres);
+ BUG();
+}
/* returns 1 if this lock fills the network structure,
* 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
ml->list = queue;
if (lock->lksb) {
ml->flags = lock->lksb->flags;
- /* send our current lvb */
- if (ml->type == LKM_EXMODE ||
- ml->type == LKM_PRMODE) {
- /* if it is already set, this had better be a PR
- * and it has to match */
- if (!dlm_lvb_is_empty(mres->lvb) &&
- (ml->type == LKM_EXMODE ||
- memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
- mlog(ML_ERROR, "mismatched lvbs!\n");
- dlm_print_one_lock_resource(lock->lockres);
- BUG();
- }
- memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
- }
+ dlm_prepare_lvb_for_migration(lock, mres, queue);
}
ml->node = lock->ml.node;
mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
struct dlm_lock *lock = NULL;
u8 from = O2NM_MAX_NODES;
unsigned int added = 0;
+ __be64 c;
mlog(0, "running %d locks for this lockres\n", mres->num_locks);
for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
/* lock is always created locally first, and
* destroyed locally last. it must be on the list */
if (!lock) {
- __be64 c = ml->cookie;
- mlog(ML_ERROR, "could not find local lock "
- "with cookie %u:%llu!\n",
+ c = ml->cookie;
+ mlog(ML_ERROR, "Could not find local lock "
+ "with cookie %u:%llu, node %u, "
+ "list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
- dlm_get_lock_cookie_seq(be64_to_cpu(c)));
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ if (lock->ml.node != ml->node) {
+ c = lock->ml.cookie;
+ mlog(ML_ERROR, "Mismatched node# in lock "
+ "cookie %u:%llu, name %.*s, node %u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ res->lockname.len, res->lockname.name,
+ lock->ml.node);
+ c = ml->cookie;
+ mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
+ "node %u, list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
__dlm_print_one_lock_resource(res);
BUG();
}
- BUG_ON(lock->ml.node != ml->node);
if (tmpq != queue) {
- mlog(0, "lock was on %u instead of %u for %.*s\n",
- j, ml->list, res->lockname.len, res->lockname.name);
+ c = ml->cookie;
+ mlog(0, "Lock cookie %u:%llu was on list %u "
+ "instead of list %u for %.*s\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ j, ml->list, res->lockname.len,
+ res->lockname.name);
+ __dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
continue;
}
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
* the lvb. */
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
} else {
- /* otherwise, the node is sending its
+ /* otherwise, the node is sending its
* most recent valid lvb info */
BUG_ON(ml->type != LKM_EXMODE &&
ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
spin_lock(&res->spinlock);
list_for_each_entry(lock, queue, list) {
if (lock->ml.cookie == ml->cookie) {
- __be64 c = lock->ml.cookie;
+ c = lock->ml.cookie;
mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
"exists on this lockres!\n", dlm->name,
res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
assert_spin_locked(&res->spinlock);
if (res->owner == dlm->node_num)
- /* if this node owned the lockres, and if the dead node
+ /* if this node owned the lockres, and if the dead node
* had an EX when he died, blank out the lvb */
search_node = dead_node;
else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
/* this node is the lockres master:
* 1) remove any stale locks for the dead node
- * 2) if the dead node had an EX when he died, blank out the lvb
+ * 2) if the dead node had an EX when he died, blank out the lvb
*/
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
"dropping ref from lockres\n", dlm->name,
res->lockname.len, res->lockname.name, freed, dead_node);
- BUG_ON(!test_bit(dead_node, res->refmap));
+ if(!test_bit(dead_node, res->refmap)) {
+ mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
+ "but ref was not set\n", dlm->name,
+ res->lockname.len, res->lockname.name, freed, dead_node);
+ __dlm_print_one_lock_resource(res);
+ }
dlm_lockres_clear_refmap_bit(dead_node, res);
} else if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
}
spin_unlock(&res->spinlock);
continue;
- }
+ }
spin_lock(&res->spinlock);
/* zero the lvb if necessary */
dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
* this function on each node racing to become the recovery
* master will not stop attempting this until either:
* a) this node gets the EX (and becomes the recovery master),
- * or b) dlm->reco.new_master gets set to some nodenum
+ * or b) dlm->reco.new_master gets set to some nodenum
* != O2NM_INVALID_NODE_NUM (another node will do the reco).
* so each time a recovery master is needed, the entire cluster
* will sync at this point. if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
-again:
+again:
memset(&lksb, 0, sizeof(lksb));
ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
if (ret == DLM_NORMAL) {
mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
dlm->name, dlm->node_num);
-
- /* got the EX lock. check to see if another node
+
+ /* got the EX lock. check to see if another node
* just became the reco master */
if (dlm_reco_master_ready(dlm)) {
mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
/* see if recovery was already finished elsewhere */
spin_lock(&dlm->spinlock);
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
- status = -EINVAL;
+ status = -EINVAL;
mlog(0, "%s: got reco EX lock, but "
"node got recovered already\n", dlm->name);
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
mlog(ML_ERROR, "%s: new master is %u "
- "but no dead node!\n",
+ "but no dead node!\n",
dlm->name, dlm->reco.new_master);
BUG();
}
@@ -2468,7 +2523,7 @@ again:
* set the master and send the messages to begin recovery */
if (!status) {
mlog(0, "%s: dead=%u, this=%u, sending "
- "begin_reco now\n", dlm->name,
+ "begin_reco now\n", dlm->name,
dlm->reco.dead_node, dlm->node_num);
status = dlm_send_begin_reco_message(dlm,
dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
dlm->name, dlm->node_num);
/* another node is master. wait on
- * reco.new_master != O2NM_INVALID_NODE_NUM
+ * reco.new_master != O2NM_INVALID_NODE_NUM
* for at most one second */
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
"begin reco msg (%d)\n", dlm->name, nodenum, ret);
ret = 0;
}
- if (ret == -EAGAIN) {
+
+ /*
+ * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
+ * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
+ * We are handling both for compatibility reasons.
+ */
+ if (ret == -EAGAIN || ret == EAGAIN) {
mlog(0, "%s: trying to start recovery of node "
"%u, but node %u is waiting for last recovery "
"to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
}
if (ret < 0) {
struct dlm_lock_resource *res;
- /* this is now a serious problem, possibly ENOMEM
+ /* this is now a serious problem, possibly ENOMEM
* in the network stack. must retry */
mlog_errno(ret);
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
} else {
mlog(ML_ERROR, "recovery lock not found\n");
}
- /* sleep for a bit in hopes that we can avoid
+ /* sleep for a bit in hopes that we can avoid
* another ENOMEM */
msleep(100);
goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
}
if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
- "node %u changing it to %u\n", dlm->name,
+ "node %u changing it to %u\n", dlm->name,
dlm->reco.dead_node, br->node_idx, br->dead_node);
}
dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
if (ret < 0) {
mlog_errno(ret);
if (dlm_is_host_down(ret)) {
- /* this has no effect on this recovery
- * session, so set the status to zero to
+ /* this has no effect on this recovery
+ * session, so set the status to zero to
* finish out the last recovery */
mlog(ML_ERROR, "node %u went down after this "
"node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
mlog(0, "%s: node %u finalizing recovery stage%d of "
"node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
-
+
spin_lock(&dlm->spinlock);
if (dlm->reco.new_master != fr->node_idx) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 00f53b2aea76..49e29ecd0201 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
DLM_UNLOCK_REGRANT_LOCK|
DLM_UNLOCK_CLEAR_CONVERT_TYPE);
- } else if (status == DLM_RECOVERING ||
- status == DLM_MIGRATING ||
+ } else if (status == DLM_RECOVERING ||
+ status == DLM_MIGRATING ||
status == DLM_FORWARD) {
/* must clear the actions because this unlock
* is about to be retried. cannot free or do
@@ -661,14 +661,14 @@ retry:
if (call_ast) {
mlog(0, "calling unlockast(%p, %d)\n", data, status);
if (is_master) {
- /* it is possible that there is one last bast
+ /* it is possible that there is one last bast
* pending. make sure it is flushed, then
* call the unlockast.
* not an issue if this is a mastered remotely,
* since this lock has been removed from the
* lockres queues and cannot be found. */
dlm_kick_thread(dlm, NULL);
- wait_event(dlm->ast_wq,
+ wait_event(dlm->ast_wq,
dlm_lock_basts_flushed(dlm, lock));
}
(*unlockast)(data, status);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c5e4a49e3a12..e044019cb3b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
lockres->l_level = lockres->l_requested;
+
+ /*
+ * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
+ * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
+ * downconverting the lock before the upconvert has fully completed.
+ */
+ lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
assert_spin_locked(&lockres->l_lock);
- lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-
if (level > lockres->l_blocking) {
/* only schedule a downconvert if we haven't already scheduled
* one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
lockres->l_blocking = level;
}
+ if (needs_downconvert)
+ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+
mlog_exit(needs_downconvert);
return needs_downconvert;
}
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
if (convert)
lockres->l_action = OCFS2_AST_INVALID;
else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
again:
wait = 0;
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
if (catch_signals && signal_pending(current)) {
ret = -ERESTARTSYS;
- goto out;
+ goto unlock;
}
- spin_lock_irqsave(&lockres->l_lock, flags);
-
mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
"Cluster lock called on freeing lockres %s! flags "
"0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
goto unlock;
}
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
+ /*
+ * We've upconverted. If the lock now has a level we can
+ * work with, we take it. If, however, the lock is not at the
+ * required level, we go thru the full cycle. One way this could
+ * happen is if a process requesting an upconvert to PR is
+ * closely followed by another requesting upconvert to an EX.
+ * If the process requesting EX lands here, we want it to
+ * continue attempting to upconvert and let the process
+ * requesting PR take the lock.
+ * If multiple processes request upconvert to PR, the first one
+ * here will take the lock. The others will have to go thru the
+ * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
+ * downconvert request.
+ */
+ if (level <= lockres->l_level)
+ goto update_holders;
+ }
+
if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
!ocfs2_may_continue_on_blocked_lock(lockres, level)) {
/* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
goto again;
}
+update_holders:
/* Ok, if we get here then we're good to go. */
ocfs2_inc_holders(lockres, level);
ret = 0;
unlock:
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
spin_unlock_irqrestore(&lockres->l_lock, flags);
out:
/*
@@ -3155,7 +3187,7 @@ out:
/* Mark the lockres as being dropped. It will no longer be
* queued if blocking, but we still may have to wait on it
* being dequeued from the downconvert thread before we can consider
- * it safe to drop.
+ * it safe to drop.
*
* You can *not* attempt to call cluster_lock on this lockres anymore. */
void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
unsigned long flags;
int blocking;
int new_level;
+ int level;
int ret = 0;
int set_lvb = 0;
unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
spin_lock_irqsave(&lockres->l_lock, flags);
- BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
-
recheck:
+ /*
+ * Is it still blocking? If not, we have no more work to do.
+ */
+ if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
+ BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = 0;
+ goto leave;
+ }
+
if (lockres->l_flags & OCFS2_LOCK_BUSY) {
/* XXX
* This is a *big* race. The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
goto leave;
}
+ /*
+ * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
+ * set when the ast is received for an upconvert just before the
+ * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
+ * on the heels of the ast, we want to delay the downconvert just
+ * enough to allow the up requestor to do its task. Because this
+ * lock is in the blocked queue, the lock will be downconverted
+ * as soon as the requestor is done with the lock.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
+ goto leave_requeue;
+
+ /*
+ * How can we block and yet be at NL? We were trying to upconvert
+ * from NL and got canceled. The code comes back here, and now
+ * we notice and clear BLOCKING.
+ */
+ if (lockres->l_level == DLM_LOCK_NL) {
+ BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+ lockres->l_blocking = DLM_LOCK_NL;
+ lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto leave;
+ }
+
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
* may sleep, so we save off a copy of what we're blocking as
* it may change while we're not holding the spin lock. */
blocking = lockres->l_blocking;
+ level = lockres->l_level;
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
goto leave;
spin_lock_irqsave(&lockres->l_lock, flags);
- if (blocking != lockres->l_blocking) {
+ if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
goto recheck;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 15713cbb865c..19ad145d2af3 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
(unsigned long long)blkno, generation);
}
-
+
*max_len = len;
bail:
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index d35a27f4523e..5328529e7fd2 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
emi->ei_clusters += ins->ei_clusters;
return 1;
} else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
- (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
+ (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
ins->ei_flags == emi->ei_flags) {
emi->ei_phys = ins->ei_phys;
emi->ei_cpos = ins->ei_cpos;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 06ccf6a86d35..558ce0312421 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
int ret;
offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
- /* ugh. in prepare/commit_write, if from==to==start of block, we
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
** skip the prepare. make sure we never send an offset for the start
** of a block
*/
@@ -1779,7 +1779,7 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
struct inode *inode = dentry->d_inode;
loff_t saved_pos, end;
- /*
+ /*
* We start with a read level meta lock and only jump to an ex
* if we need to make modifications here.
*/
@@ -2013,8 +2013,8 @@ out_dio:
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
- if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode) ||
- (file->f_flags & O_DIRECT && has_refcount)) {
+ if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ ((file->f_flags & O_DIRECT) && has_refcount)) {
ret = filemap_fdatawrite_range(file->f_mapping, pos,
pos + count - 1);
if (ret < 0)
@@ -2033,7 +2033,7 @@ out_dio:
pos + count - 1);
}
- /*
+ /*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
* function pointer which is called when o_direct io completes so that
* it can unlock our rw lock. (it's the clustered equivalent of
@@ -2198,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
goto bail;
}
- /*
+ /*
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
* need locks to protect pending reads from racing with truncate.
*/
@@ -2220,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
* We're fine letting folks race truncates and extending
* writes with read across the cluster, just like they can
* locally. Hence no rw_lock during read.
- *
+ *
* Take and drop the meta data lock to update inode fields
* like i_size. This allows the checks down below
- * generic_file_aio_read() a chance of actually working.
+ * generic_file_aio_read() a chance of actually working.
*/
ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
if (ret < 0) {
@@ -2248,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
bail:
if (have_alloc_sem)
up_read(&inode->i_alloc_sem);
- if (rw_level != -1)
+ if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
mlog_exit(ret);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0297fb8982b8..88459bdd1ff3 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
status = ocfs2_try_open_lock(inode, 0);
if (status) {
- make_bad_inode(inode);
+ make_bad_inode(inode);
return status;
}
}
@@ -684,7 +684,7 @@ bail:
return status;
}
-/*
+/*
* Serialize with orphan dir recovery. If the process doing
* recovery on this orphan dir does an iget() with the dir
* i_mutex held, we'll deadlock here. Instead we detect this
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 31fbb0619510..7d9d9c132cef 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/compat.h>
#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
+ bool preserve;
+ struct reflink_arguments args;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
switch (cmd) {
case OCFS2_IOC32_GETFLAGS:
cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case OCFS2_IOC_GROUP_EXTEND:
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
- case OCFS2_IOC_REFLINK:
break;
+ case OCFS2_IOC_REFLINK:
+ if (copy_from_user(&args, (struct reflink_arguments *)arg,
+ sizeof(args)))
+ return -EFAULT;
+ preserve = (args.preserve != 0);
+
+ return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
+ compat_ptr(args.new_path), preserve);
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bf34c491ae96..9336c60e3a36 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
status = -ENOENT;
mlog_errno(status);
return status;
- }
+ }
mutex_lock(&orphan_dir_inode->i_mutex);
status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9362eea7424b..740f448041e2 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a
call to dlm_lock. Only
exists with BUSY set. */
+#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
+ * from downconverting
+ * before the upconvert
+ * has completed */
struct ocfs2_lock_res_ops;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 1a1a679e51b5..7638a38c32bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
}
-static inline int ocfs2_max_inline_data(int blocksize)
+static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
+ struct ocfs2_dinode *di)
{
- return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+ if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
+ return blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
+ di->i_xattr_inline_size;
+ else
+ return blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data);
}
static inline int ocfs2_extent_recs_per_inode(int blocksize)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 74db2be75dd6..8ae65c9c020c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
page = grab_cache_page(mapping, page_index);
- /* This page can't be dirtied before we CoW it out. */
- BUG_ON(PageDirty(page));
+ /*
+ * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+ * can't be dirtied before we CoW it out.
+ */
+ if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+ BUG_ON(PageDirty(page));
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index e49c41050264..3038c92af493 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
u32 dlm_key;
struct dlm_ctxt *dlm;
struct o2dlm_private *priv;
- struct dlm_protocol_version dlm_version;
+ struct dlm_protocol_version fs_version;
BUG_ON(conn == NULL);
BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
/* used by the dlm code to make message headers unique, each
* node in this domain must agree on this. */
dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
- dlm_version.pv_major = conn->cc_version.pv_major;
- dlm_version.pv_minor = conn->cc_version.pv_minor;
+ fs_version.pv_major = conn->cc_version.pv_major;
+ fs_version.pv_minor = conn->cc_version.pv_minor;
- dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version);
+ dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
if (IS_ERR(dlm)) {
rc = PTR_ERR(dlm);
mlog_errno(rc);
goto out_free;
}
- conn->cc_version.pv_major = dlm_version.pv_major;
- conn->cc_version.pv_minor = dlm_version.pv_minor;
+ conn->cc_version.pv_major = fs_version.pv_major;
+ conn->cc_version.pv_minor = fs_version.pv_minor;
conn->cc_lockspace = dlm;
dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26069917a9f5..755cd49a5ef3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
"file system, but write access is "
"unavailable.\n");
else
- mlog_errno(status);
+ mlog_errno(status);
goto read_super_error;
}
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 49b133ccbf11..32499d213fc4 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
}
memcpy(link, target, len);
- nd_set_link(nd, link);
bail:
+ nd_set_link(nd, status ? ERR_PTR(status) : link);
brelse(bh);
mlog_exit(status);
- return status ? ERR_PTR(status) : link;
+ return NULL;
}
static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
- char *link = cookie;
-
- kfree(link);
+ char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
}
const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index c61369342a27..a0a120e82b97 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
}
/* Warning: even if it returns true, this does *not* guarantee that
- * the block is stored in our inode metadata cache.
- *
+ * the block is stored in our inode metadata cache.
+ *
* This can be called under lock_buffer()
*/
int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18d5cc62d8ed..58324c299165 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1419,7 +1419,6 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
goto out;
error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
- nd->last_type = LAST_BIND;
out:
return ERR_PTR(error);
}
@@ -2370,16 +2369,30 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct pid_namespace *ns = dentry->d_sb->s_fs_info;
pid_t tgid = task_tgid_nr_ns(current, ns);
- char tmp[PROC_NUMBUF];
- if (!tgid)
- return ERR_PTR(-ENOENT);
- sprintf(tmp, "%d", task_tgid_nr_ns(current, ns));
- return ERR_PTR(vfs_follow_link(nd,tmp));
+ char *name = ERR_PTR(-ENOENT);
+ if (tgid) {
+ name = __getname();
+ if (!name)
+ name = ERR_PTR(-ENOMEM);
+ else
+ sprintf(name, "%d", tgid);
+ }
+ nd_set_link(nd, name);
+ return NULL;
+}
+
+static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
}
static const struct inode_operations proc_self_inode_operations = {
.readlink = proc_self_readlink,
.follow_link = proc_self_follow_link,
+ .put_link = proc_self_put_link,
};
/*
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 2efc57173fd7..1739a4aba25f 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -123,30 +123,6 @@ add_error:
/*****************************************************************************/
/*
- * check that file shrinkage doesn't leave any VMAs dangling in midair
- */
-static int ramfs_nommu_check_mappings(struct inode *inode,
- size_t newsize, size_t size)
-{
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
-
- /* search for VMAs that fall within the dead zone */
- vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
- newsize >> PAGE_SHIFT,
- (size + PAGE_SIZE - 1) >> PAGE_SHIFT
- ) {
- /* found one - only interested if it's shared out of the page
- * cache */
- if (vma->vm_flags & VM_SHARED)
- return -ETXTBSY; /* not quite true, but near enough */
- }
-
- return 0;
-}
-
-/*****************************************************************************/
-/*
*
*/
static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
@@ -164,7 +140,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
/* check that a decrease in size doesn't cut off any shared mappings */
if (newsize < size) {
- ret = ramfs_nommu_check_mappings(inode, newsize, size);
+ ret = nommu_shrink_inode_mappings(inode, size, newsize);
if (ret < 0)
return ret;
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 9087b10209e6..2df0f5c7c60b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1497,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
args.objectid = key->on_disk_key.k_objectid;
args.dirid = key->on_disk_key.k_dir_id;
+ reiserfs_write_unlock(s);
inode = iget5_locked(s, key->on_disk_key.k_objectid,
reiserfs_find_actor, reiserfs_init_locked_inode,
(void *)(&args));
+ reiserfs_write_lock(s);
if (!inode)
return ERR_PTR(-ENOMEM);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 83ac4d3b3cb0..ba98546fabbd 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2913,7 +2913,9 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
journal->j_mount_id = 10;
journal->j_state = 0;
atomic_set(&(journal->j_jlock), 0);
+ reiserfs_write_unlock(sb);
journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
+ reiserfs_write_lock(sb);
journal->j_cnode_free_orig = journal->j_cnode_free_list;
journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
journal->j_cnode_used = 0;
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index c117fa80d1e9..42d213546894 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -544,6 +544,7 @@ error:
error_rsb_inval:
ret = -EINVAL;
error_rsb:
+ kfree(rsb);
return ret;
}
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 220b758523ae..6a06a1d1ea7b 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
if (!sd_attrs)
return -ENOMEM;
sd->s_iattr = sd_attrs;
- } else {
- /* attributes were changed at least once in past */
- iattrs = &sd_attrs->ia_iattr;
-
- if (ia_valid & ATTR_UID)
- iattrs->ia_uid = iattr->ia_uid;
- if (ia_valid & ATTR_GID)
- iattrs->ia_gid = iattr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- iattrs->ia_atime = iattr->ia_atime;
- if (ia_valid & ATTR_MTIME)
- iattrs->ia_mtime = iattr->ia_mtime;
- if (ia_valid & ATTR_CTIME)
- iattrs->ia_ctime = iattr->ia_ctime;
- if (ia_valid & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
- iattrs->ia_mode = sd->s_mode = mode;
- }
+ }
+ /* attributes were changed at least once in past */
+ iattrs = &sd_attrs->ia_iattr;
+
+ if (ia_valid & ATTR_UID)
+ iattrs->ia_uid = iattr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ iattrs->ia_gid = iattr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+ iattrs->ia_atime = iattr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ iattrs->ia_mtime = iattr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ iattrs->ia_ctime = iattr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+ iattrs->ia_mode = sd->s_mode = mode;
}
return 0;
}
diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile
index 56641fe52a23..5c5a366aa332 100644
--- a/fs/xfs/Makefile
+++ b/fs/xfs/Makefile
@@ -16,7 +16,7 @@
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
-EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6 -funsigned-char
+EXTRA_CFLAGS += -I$(src) -I$(src)/linux-2.6
XFS_LINUX := linux-2.6
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 2d3f90afe5f1..bc7405585def 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -16,7 +16,6 @@
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/mm.h>
-#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/blkdev.h>
@@ -24,8 +23,25 @@
#include "time.h"
#include "kmem.h"
-#define MAX_VMALLOCS 6
-#define MAX_SLAB_SIZE 0x20000
+/*
+ * Greedy allocation. May fail and may return vmalloced memory.
+ *
+ * Must be freed using kmem_free_large.
+ */
+void *
+kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
+{
+ void *ptr;
+ size_t kmsize = maxsize;
+
+ while (!(ptr = kmem_zalloc_large(kmsize))) {
+ if ((kmsize >>= 1) <= minsize)
+ kmsize = minsize;
+ }
+ if (ptr)
+ *size = kmsize;
+ return ptr;
+}
void *
kmem_alloc(size_t size, unsigned int __nocast flags)
@@ -34,19 +50,8 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
gfp_t lflags = kmem_flags_convert(flags);
void *ptr;
-#ifdef DEBUG
- if (unlikely(!(flags & KM_LARGE) && (size > PAGE_SIZE))) {
- printk(KERN_WARNING "Large %s attempt, size=%ld\n",
- __func__, (long)size);
- dump_stack();
- }
-#endif
-
do {
- if (size < MAX_SLAB_SIZE || retries > MAX_VMALLOCS)
- ptr = kmalloc(size, lflags);
- else
- ptr = __vmalloc(size, lflags, PAGE_KERNEL);
+ ptr = kmalloc(size, lflags);
if (ptr || (flags & (KM_MAYFAIL|KM_NOSLEEP)))
return ptr;
if (!(++retries % 100))
@@ -68,27 +73,6 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
return ptr;
}
-void *
-kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
- unsigned int __nocast flags)
-{
- void *ptr;
- size_t kmsize = maxsize;
- unsigned int kmflags = (flags & ~KM_SLEEP) | KM_NOSLEEP;
-
- while (!(ptr = kmem_zalloc(kmsize, kmflags))) {
- if ((kmsize <= minsize) && (flags & KM_NOSLEEP))
- break;
- if ((kmsize >>= 1) <= minsize) {
- kmsize = minsize;
- kmflags = flags;
- }
- }
- if (ptr)
- *size = kmsize;
- return ptr;
-}
-
void
kmem_free(const void *ptr)
{
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 179cbd630f69..f7c8f7a9ea6d 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/vmalloc.h>
/*
* General memory allocation interfaces
@@ -30,7 +31,6 @@
#define KM_NOSLEEP 0x0002u
#define KM_NOFS 0x0004u
#define KM_MAYFAIL 0x0008u
-#define KM_LARGE 0x0010u
/*
* We use a special process flag to avoid recursive callbacks into
@@ -42,7 +42,7 @@ kmem_flags_convert(unsigned int __nocast flags)
{
gfp_t lflags;
- BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_LARGE));
+ BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL));
if (flags & KM_NOSLEEP) {
lflags = GFP_ATOMIC | __GFP_NOWARN;
@@ -56,10 +56,25 @@ kmem_flags_convert(unsigned int __nocast flags)
extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
-extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
extern void kmem_free(const void *);
+static inline void *kmem_zalloc_large(size_t size)
+{
+ void *ptr;
+
+ ptr = vmalloc(size);
+ if (ptr)
+ memset(ptr, 0, size);
+ return ptr;
+}
+static inline void kmem_free_large(void *ptr)
+{
+ vfree(ptr);
+}
+
+extern void *kmem_zalloc_greedy(size_t *, size_t, size_t);
+
/*
* Zone interfaces
*/
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 883ca5ab8af5..bf85bbe4a9ae 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -106,7 +106,7 @@ xfs_get_acl(struct inode *inode, int type)
struct posix_acl *acl;
struct xfs_acl *xfs_acl;
int len = sizeof(struct xfs_acl);
- char *ea_name;
+ unsigned char *ea_name;
int error;
acl = get_cached_acl(inode, type);
@@ -133,7 +133,8 @@ xfs_get_acl(struct inode *inode, int type)
if (!xfs_acl)
return ERR_PTR(-ENOMEM);
- error = -xfs_attr_get(ip, ea_name, (char *)xfs_acl, &len, ATTR_ROOT);
+ error = -xfs_attr_get(ip, ea_name, (unsigned char *)xfs_acl,
+ &len, ATTR_ROOT);
if (error) {
/*
* If the attribute doesn't exist make sure we have a negative
@@ -162,7 +163,7 @@ STATIC int
xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
{
struct xfs_inode *ip = XFS_I(inode);
- char *ea_name;
+ unsigned char *ea_name;
int error;
if (S_ISLNK(inode->i_mode))
@@ -194,7 +195,7 @@ xfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
(sizeof(struct xfs_acl_entry) *
(XFS_ACL_MAX_ENTRIES - acl->a_count));
- error = -xfs_attr_set(ip, ea_name, (char *)xfs_acl,
+ error = -xfs_attr_set(ip, ea_name, (unsigned char *)xfs_acl,
len, ATTR_ROOT);
kfree(xfs_acl);
@@ -262,7 +263,7 @@ xfs_set_mode(struct inode *inode, mode_t mode)
}
static int
-xfs_acl_exists(struct inode *inode, char *name)
+xfs_acl_exists(struct inode *inode, unsigned char *name)
{
int len = sizeof(struct xfs_acl);
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 77b8be81c769..d50df3a8101c 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -33,6 +33,7 @@
#include <linux/migrate.h>
#include <linux/backing-dev.h>
#include <linux/freezer.h>
+#include <linux/list_sort.h>
#include "xfs_sb.h"
#include "xfs_inum.h"
@@ -1051,22 +1052,30 @@ xfs_buf_ioerror(
}
int
-xfs_bawrite(
- void *mp,
+xfs_bwrite(
+ struct xfs_mount *mp,
struct xfs_buf *bp)
{
- trace_xfs_buf_bawrite(bp, _RET_IP_);
+ int iowait = (bp->b_flags & XBF_ASYNC) == 0;
+ int error = 0;
- ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
+ bp->b_strat = xfs_bdstrat_cb;
+ bp->b_mount = mp;
+ bp->b_flags |= XBF_WRITE;
+ if (!iowait)
+ bp->b_flags |= _XBF_RUN_QUEUES;
xfs_buf_delwri_dequeue(bp);
+ xfs_buf_iostrategy(bp);
- bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
- bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
+ if (iowait) {
+ error = xfs_buf_iowait(bp);
+ if (error)
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
+ xfs_buf_relse(bp);
+ }
- bp->b_mount = mp;
- bp->b_strat = xfs_bdstrat_cb;
- return xfs_bdstrat_cb(bp);
+ return error;
}
void
@@ -1085,6 +1094,126 @@ xfs_bdwrite(
xfs_buf_delwri_queue(bp, 1);
}
+/*
+ * Called when we want to stop a buffer from getting written or read.
+ * We attach the EIO error, muck with its flags, and call biodone
+ * so that the proper iodone callbacks get called.
+ */
+STATIC int
+xfs_bioerror(
+ xfs_buf_t *bp)
+{
+#ifdef XFSERRORDEBUG
+ ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
+#endif
+
+ /*
+ * No need to wait until the buffer is unpinned, we aren't flushing it.
+ */
+ XFS_BUF_ERROR(bp, EIO);
+
+ /*
+ * We're calling biodone, so delete XBF_DONE flag.
+ */
+ XFS_BUF_UNREAD(bp);
+ XFS_BUF_UNDELAYWRITE(bp);
+ XFS_BUF_UNDONE(bp);
+ XFS_BUF_STALE(bp);
+
+ XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+ xfs_biodone(bp);
+
+ return EIO;
+}
+
+/*
+ * Same as xfs_bioerror, except that we are releasing the buffer
+ * here ourselves, and avoiding the biodone call.
+ * This is meant for userdata errors; metadata bufs come with
+ * iodone functions attached, so that we can track down errors.
+ */
+STATIC int
+xfs_bioerror_relse(
+ struct xfs_buf *bp)
+{
+ int64_t fl = XFS_BUF_BFLAGS(bp);
+ /*
+ * No need to wait until the buffer is unpinned.
+ * We aren't flushing it.
+ *
+ * chunkhold expects B_DONE to be set, whether
+ * we actually finish the I/O or not. We don't want to
+ * change that interface.
+ */
+ XFS_BUF_UNREAD(bp);
+ XFS_BUF_UNDELAYWRITE(bp);
+ XFS_BUF_DONE(bp);
+ XFS_BUF_STALE(bp);
+ XFS_BUF_CLR_IODONE_FUNC(bp);
+ XFS_BUF_CLR_BDSTRAT_FUNC(bp);
+ if (!(fl & XBF_ASYNC)) {
+ /*
+ * Mark b_error and B_ERROR _both_.
+ * Lot's of chunkcache code assumes that.
+ * There's no reason to mark error for
+ * ASYNC buffers.
+ */
+ XFS_BUF_ERROR(bp, EIO);
+ XFS_BUF_FINISH_IOWAIT(bp);
+ } else {
+ xfs_buf_relse(bp);
+ }
+
+ return EIO;
+}
+
+
+/*
+ * All xfs metadata buffers except log state machine buffers
+ * get this attached as their b_bdstrat callback function.
+ * This is so that we can catch a buffer
+ * after prematurely unpinning it to forcibly shutdown the filesystem.
+ */
+int
+xfs_bdstrat_cb(
+ struct xfs_buf *bp)
+{
+ if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
+ /*
+ * Metadata write that didn't get logged but
+ * written delayed anyway. These aren't associated
+ * with a transaction, and can be ignored.
+ */
+ if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
+ return xfs_bioerror_relse(bp);
+ else
+ return xfs_bioerror(bp);
+ }
+
+ xfs_buf_iorequest(bp);
+ return 0;
+}
+
+/*
+ * Wrapper around bdstrat so that we can stop data from going to disk in case
+ * we are shutting down the filesystem. Typically user data goes thru this
+ * path; one of the exceptions is the superblock.
+ */
+void
+xfsbdstrat(
+ struct xfs_mount *mp,
+ struct xfs_buf *bp)
+{
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
+ xfs_bioerror_relse(bp);
+ return;
+ }
+
+ xfs_buf_iorequest(bp);
+}
+
STATIC void
_xfs_buf_ioend(
xfs_buf_t *bp,
@@ -1296,7 +1425,7 @@ xfs_buf_iomove(
xfs_buf_t *bp, /* buffer to process */
size_t boff, /* starting buffer offset */
size_t bsize, /* length to copy */
- caddr_t data, /* data address */
+ void *data, /* data address */
xfs_buf_rw_t mode) /* read/write/zero flag */
{
size_t bend, cpoff, csize;
@@ -1378,8 +1507,8 @@ xfs_alloc_bufhash(
btp->bt_hashshift = external ? 3 : 8; /* 8 or 256 buckets */
btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
- btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
- sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
+ btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
+ sizeof(xfs_bufhash_t));
for (i = 0; i < (1 << btp->bt_hashshift); i++) {
spin_lock_init(&btp->bt_hash[i].bh_lock);
INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
@@ -1390,7 +1519,7 @@ STATIC void
xfs_free_bufhash(
xfs_buftarg_t *btp)
{
- kmem_free(btp->bt_hash);
+ kmem_free_large(btp->bt_hash);
btp->bt_hash = NULL;
}
@@ -1595,6 +1724,11 @@ xfs_buf_delwri_queue(
list_del(&bp->b_list);
}
+ if (list_empty(dwq)) {
+ /* start xfsbufd as it is about to have something to do */
+ wake_up_process(bp->b_target->bt_task);
+ }
+
bp->b_flags |= _XBF_DELWRI_Q;
list_add_tail(&bp->b_list, dwq);
bp->b_queuetime = jiffies;
@@ -1626,6 +1760,35 @@ xfs_buf_delwri_dequeue(
trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
}
+/*
+ * If a delwri buffer needs to be pushed before it has aged out, then promote
+ * it to the head of the delwri queue so that it will be flushed on the next
+ * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
+ * than the age currently needed to flush the buffer. Hence the next time the
+ * xfsbufd sees it is guaranteed to be considered old enough to flush.
+ */
+void
+xfs_buf_delwri_promote(
+ struct xfs_buf *bp)
+{
+ struct xfs_buftarg *btp = bp->b_target;
+ long age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;
+
+ ASSERT(bp->b_flags & XBF_DELWRI);
+ ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+
+ /*
+ * Check the buffer age before locking the delayed write queue as we
+ * don't need to promote buffers that are already past the flush age.
+ */
+ if (bp->b_queuetime < jiffies - age)
+ return;
+ bp->b_queuetime = jiffies - age;
+ spin_lock(&btp->bt_delwrite_lock);
+ list_move(&bp->b_list, &btp->bt_delwrite_queue);
+ spin_unlock(&btp->bt_delwrite_lock);
+}
+
STATIC void
xfs_buf_runall_queues(
struct workqueue_struct *queue)
@@ -1644,6 +1807,8 @@ xfsbufd_wakeup(
list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
continue;
+ if (list_empty(&btp->bt_delwrite_queue))
+ continue;
set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
wake_up_process(btp->bt_task);
}
@@ -1694,20 +1859,53 @@ xfs_buf_delwri_split(
}
+/*
+ * Compare function is more complex than it needs to be because
+ * the return value is only 32 bits and we are doing comparisons
+ * on 64 bit values
+ */
+static int
+xfs_buf_cmp(
+ void *priv,
+ struct list_head *a,
+ struct list_head *b)
+{
+ struct xfs_buf *ap = container_of(a, struct xfs_buf, b_list);
+ struct xfs_buf *bp = container_of(b, struct xfs_buf, b_list);
+ xfs_daddr_t diff;
+
+ diff = ap->b_bn - bp->b_bn;
+ if (diff < 0)
+ return -1;
+ if (diff > 0)
+ return 1;
+ return 0;
+}
+
+void
+xfs_buf_delwri_sort(
+ xfs_buftarg_t *target,
+ struct list_head *list)
+{
+ list_sort(NULL, list, xfs_buf_cmp);
+}
+
STATIC int
xfsbufd(
void *data)
{
- struct list_head tmp;
- xfs_buftarg_t *target = (xfs_buftarg_t *)data;
- int count;
- xfs_buf_t *bp;
+ xfs_buftarg_t *target = (xfs_buftarg_t *)data;
current->flags |= PF_MEMALLOC;
set_freezable();
do {
+ long age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
+ long tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
+ int count = 0;
+ struct list_head tmp;
+
if (unlikely(freezing(current))) {
set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
refrigerator();
@@ -1715,17 +1913,16 @@ xfsbufd(
clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
}
- schedule_timeout_interruptible(
- xfs_buf_timer_centisecs * msecs_to_jiffies(10));
-
- xfs_buf_delwri_split(target, &tmp,
- xfs_buf_age_centisecs * msecs_to_jiffies(10));
+ /* sleep for a long time if there is nothing to do. */
+ if (list_empty(&target->bt_delwrite_queue))
+ tout = MAX_SCHEDULE_TIMEOUT;
+ schedule_timeout_interruptible(tout);
- count = 0;
+ xfs_buf_delwri_split(target, &tmp, age);
+ list_sort(NULL, &tmp, xfs_buf_cmp);
while (!list_empty(&tmp)) {
- bp = list_entry(tmp.next, xfs_buf_t, b_list);
- ASSERT(target == bp->b_target);
-
+ struct xfs_buf *bp;
+ bp = list_first_entry(&tmp, struct xfs_buf, b_list);
list_del_init(&bp->b_list);
xfs_buf_iostrategy(bp);
count++;
@@ -1751,42 +1948,45 @@ xfs_flush_buftarg(
xfs_buftarg_t *target,
int wait)
{
- struct list_head tmp;
- xfs_buf_t *bp, *n;
+ xfs_buf_t *bp;
int pincount = 0;
+ LIST_HEAD(tmp_list);
+ LIST_HEAD(wait_list);
xfs_buf_runall_queues(xfsconvertd_workqueue);
xfs_buf_runall_queues(xfsdatad_workqueue);
xfs_buf_runall_queues(xfslogd_workqueue);
set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
- pincount = xfs_buf_delwri_split(target, &tmp, 0);
+ pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
/*
- * Dropped the delayed write list lock, now walk the temporary list
+ * Dropped the delayed write list lock, now walk the temporary list.
+ * All I/O is issued async and then if we need to wait for completion
+ * we do that after issuing all the IO.
*/
- list_for_each_entry_safe(bp, n, &tmp, b_list) {
+ list_sort(NULL, &tmp_list, xfs_buf_cmp);
+ while (!list_empty(&tmp_list)) {
+ bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
ASSERT(target == bp->b_target);
- if (wait)
+ list_del_init(&bp->b_list);
+ if (wait) {
bp->b_flags &= ~XBF_ASYNC;
- else
- list_del_init(&bp->b_list);
-
+ list_add(&bp->b_list, &wait_list);
+ }
xfs_buf_iostrategy(bp);
}
- if (wait)
+ if (wait) {
+ /* Expedite and wait for IO to complete. */
blk_run_address_space(target->bt_mapping);
+ while (!list_empty(&wait_list)) {
+ bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
- /*
- * Remaining list items must be flushed before returning
- */
- while (!list_empty(&tmp)) {
- bp = list_entry(tmp.next, xfs_buf_t, b_list);
-
- list_del_init(&bp->b_list);
- xfs_iowait(bp);
- xfs_buf_relse(bp);
+ list_del_init(&bp->b_list);
+ xfs_iowait(bp);
+ xfs_buf_relse(bp);
+ }
}
return pincount;
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index a34c7b54822d..386e7361e50e 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -232,13 +232,17 @@ extern void xfs_buf_lock(xfs_buf_t *);
extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
-extern int xfs_bawrite(void *mp, xfs_buf_t *bp);
+extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
extern void xfs_bdwrite(void *mp, xfs_buf_t *bp);
+
+extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
+extern int xfs_bdstrat_cb(struct xfs_buf *);
+
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern int xfs_buf_iorequest(xfs_buf_t *);
extern int xfs_buf_iowait(xfs_buf_t *);
-extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
+extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
xfs_buf_rw_t);
static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
@@ -261,6 +265,7 @@ extern int xfs_buf_ispin(xfs_buf_t *);
/* Delayed Write Buffer Routines */
extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
+extern void xfs_buf_delwri_promote(xfs_buf_t *);
/* Buffer Daemon Setup Routines */
extern int xfs_buf_init(void);
@@ -270,33 +275,19 @@ extern void xfs_buf_terminate(void);
({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
-#define XFS_B_ASYNC XBF_ASYNC
-#define XFS_B_DELWRI XBF_DELWRI
-#define XFS_B_READ XBF_READ
-#define XFS_B_WRITE XBF_WRITE
-#define XFS_B_STALE XBF_STALE
-
-#define XFS_BUF_TRYLOCK XBF_TRYLOCK
-#define XFS_INCORE_TRYLOCK XBF_TRYLOCK
-#define XFS_BUF_LOCK XBF_LOCK
-#define XFS_BUF_MAPPED XBF_MAPPED
-
-#define BUF_BUSY XBF_DONT_BLOCK
-
#define XFS_BUF_BFLAGS(bp) ((bp)->b_flags)
#define XFS_BUF_ZEROFLAGS(bp) ((bp)->b_flags &= \
~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI|XBF_ORDERED))
-#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XFS_B_STALE)
-#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XFS_B_STALE)
-#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XFS_B_STALE)
+#define XFS_BUF_STALE(bp) ((bp)->b_flags |= XBF_STALE)
+#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
+#define XFS_BUF_ISSTALE(bp) ((bp)->b_flags & XBF_STALE)
#define XFS_BUF_SUPER_STALE(bp) do { \
XFS_BUF_STALE(bp); \
xfs_buf_delwri_dequeue(bp); \
XFS_BUF_DONE(bp); \
} while (0)
-#define XFS_BUF_MANAGE XBF_FS_MANAGED
#define XFS_BUF_UNMANAGE(bp) ((bp)->b_flags &= ~XBF_FS_MANAGED)
#define XFS_BUF_DELAYWRITE(bp) ((bp)->b_flags |= XBF_DELWRI)
@@ -385,31 +376,11 @@ static inline void xfs_buf_relse(xfs_buf_t *bp)
#define xfs_biomove(bp, off, len, data, rw) \
xfs_buf_iomove((bp), (off), (len), (data), \
- ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
+ ((rw) == XBF_WRITE) ? XBRW_WRITE : XBRW_READ)
#define xfs_biozero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
-
-static inline int XFS_bwrite(xfs_buf_t *bp)
-{
- int iowait = (bp->b_flags & XBF_ASYNC) == 0;
- int error = 0;
-
- if (!iowait)
- bp->b_flags |= _XBF_RUN_QUEUES;
-
- xfs_buf_delwri_dequeue(bp);
- xfs_buf_iostrategy(bp);
- if (iowait) {
- error = xfs_buf_iowait(bp);
- xfs_buf_relse(bp);
- }
- return error;
-}
-
-#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
-
#define xfs_iowait(bp) xfs_buf_iowait(bp)
#define xfs_baread(target, rablkno, ralen) \
@@ -424,6 +395,7 @@ extern void xfs_free_buftarg(struct xfs_mount *, struct xfs_buftarg *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
+
#ifdef CONFIG_KDB_MODULES
extern struct list_head *xfs_get_buftarg_list(void);
#endif
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index 8f4d70789e3f..846b75aeb2ab 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -226,8 +226,8 @@ xfs_fs_nfs_commit_metadata(
xfs_ilock(ip, XFS_ILOCK_SHARED);
if (xfs_ipincount(ip)) {
- error = _xfs_log_force(mp, ip->i_itemp->ili_last_lsn,
- XFS_LOG_FORCE | XFS_LOG_SYNC, NULL);
+ error = _xfs_log_force_lsn(mp, ip->i_itemp->ili_last_lsn,
+ XFS_LOG_SYNC, NULL);
}
xfs_iunlock(ip, XFS_ILOCK_SHARED);
diff --git a/fs/xfs/linux-2.6/xfs_fs_subr.c b/fs/xfs/linux-2.6/xfs_fs_subr.c
index 7501b85fd860..b6918d76bc7b 100644
--- a/fs/xfs/linux-2.6/xfs_fs_subr.c
+++ b/fs/xfs/linux-2.6/xfs_fs_subr.c
@@ -79,7 +79,7 @@ xfs_flush_pages(
xfs_iflags_clear(ip, XFS_ITRUNCATED);
ret = -filemap_fdatawrite(mapping);
}
- if (flags & XFS_B_ASYNC)
+ if (flags & XBF_ASYNC)
return ret;
ret2 = xfs_wait_on_pages(ip, first, last);
if (!ret)
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index a034cf624437..4ea1ee18aded 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -447,12 +447,12 @@ xfs_attrlist_by_handle(
int
xfs_attrmulti_attr_get(
struct inode *inode,
- char *name,
- char __user *ubuf,
+ unsigned char *name,
+ unsigned char __user *ubuf,
__uint32_t *len,
__uint32_t flags)
{
- char *kbuf;
+ unsigned char *kbuf;
int error = EFAULT;
if (*len > XATTR_SIZE_MAX)
@@ -476,12 +476,12 @@ xfs_attrmulti_attr_get(
int
xfs_attrmulti_attr_set(
struct inode *inode,
- char *name,
- const char __user *ubuf,
+ unsigned char *name,
+ const unsigned char __user *ubuf,
__uint32_t len,
__uint32_t flags)
{
- char *kbuf;
+ unsigned char *kbuf;
int error = EFAULT;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
@@ -501,7 +501,7 @@ xfs_attrmulti_attr_set(
int
xfs_attrmulti_attr_remove(
struct inode *inode,
- char *name,
+ unsigned char *name,
__uint32_t flags)
{
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
@@ -519,7 +519,7 @@ xfs_attrmulti_by_handle(
xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
- char *attr_name;
+ unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
@@ -547,7 +547,7 @@ xfs_attrmulti_by_handle(
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
- ops[i].am_error = strncpy_from_user(attr_name,
+ ops[i].am_error = strncpy_from_user((char *)attr_name,
ops[i].am_attrname, MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
error = -ERANGE;
@@ -1431,6 +1431,9 @@ xfs_file_ioctl(
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ return -XFS_ERROR(EROFS);
+
if (copy_from_user(&inout, arg, sizeof(inout)))
return -XFS_ERROR(EFAULT);
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h
index 7bd7c6afc1eb..d56173b34a2a 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.h
+++ b/fs/xfs/linux-2.6/xfs_ioctl.h
@@ -45,23 +45,23 @@ xfs_readlink_by_handle(
extern int
xfs_attrmulti_attr_get(
struct inode *inode,
- char *name,
- char __user *ubuf,
+ unsigned char *name,
+ unsigned char __user *ubuf,
__uint32_t *len,
__uint32_t flags);
extern int
- xfs_attrmulti_attr_set(
+xfs_attrmulti_attr_set(
struct inode *inode,
- char *name,
- const char __user *ubuf,
+ unsigned char *name,
+ const unsigned char __user *ubuf,
__uint32_t len,
__uint32_t flags);
extern int
xfs_attrmulti_attr_remove(
struct inode *inode,
- char *name,
+ unsigned char *name,
__uint32_t flags);
extern struct dentry *
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index be1527b1670c..0bf6d61f0528 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -411,7 +411,7 @@ xfs_compat_attrmulti_by_handle(
compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
struct dentry *dentry;
unsigned int i, size;
- char *attr_name;
+ unsigned char *attr_name;
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
@@ -440,7 +440,7 @@ xfs_compat_attrmulti_by_handle(
error = 0;
for (i = 0; i < am_hreq.opcount; i++) {
- ops[i].am_error = strncpy_from_user(attr_name,
+ ops[i].am_error = strncpy_from_user((char *)attr_name,
compat_ptr(ops[i].am_attrname),
MAXNAMELEN);
if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 225946012d0b..e8566bbf0f00 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -140,10 +140,10 @@ xfs_init_security(
struct xfs_inode *ip = XFS_I(inode);
size_t length;
void *value;
- char *name;
+ unsigned char *name;
int error;
- error = security_inode_init_security(inode, dir, &name,
+ error = security_inode_init_security(inode, dir, (char **)&name,
&value, &length);
if (error) {
if (error == -EOPNOTSUPP)
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 0d32457abef1..eac6f80d786d 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -630,18 +630,9 @@ start:
* by root. This keeps people from modifying setuid and
* setgid binaries.
*/
-
- if (((xip->i_d.di_mode & S_ISUID) ||
- ((xip->i_d.di_mode & (S_ISGID | S_IXGRP)) ==
- (S_ISGID | S_IXGRP))) &&
- !capable(CAP_FSETID)) {
- error = xfs_write_clear_setuid(xip);
- if (likely(!error))
- error = -file_remove_suid(file);
- if (unlikely(error)) {
- goto out_unlock_internal;
- }
- }
+ error = -file_remove_suid(file);
+ if (unlikely(error))
+ goto out_unlock_internal;
/* We can write back this queue in page reclaim */
current->backing_dev_info = mapping->backing_dev_info;
@@ -784,53 +775,6 @@ write_retry:
}
/*
- * All xfs metadata buffers except log state machine buffers
- * get this attached as their b_bdstrat callback function.
- * This is so that we can catch a buffer
- * after prematurely unpinning it to forcibly shutdown the filesystem.
- */
-int
-xfs_bdstrat_cb(struct xfs_buf *bp)
-{
- if (XFS_FORCED_SHUTDOWN(bp->b_mount)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- /*
- * Metadata write that didn't get logged but
- * written delayed anyway. These aren't associated
- * with a transaction, and can be ignored.
- */
- if (XFS_BUF_IODONE_FUNC(bp) == NULL &&
- (XFS_BUF_ISREAD(bp)) == 0)
- return (xfs_bioerror_relse(bp));
- else
- return (xfs_bioerror(bp));
- }
-
- xfs_buf_iorequest(bp);
- return 0;
-}
-
-/*
- * Wrapper around bdstrat so that we can stop data from going to disk in case
- * we are shutting down the filesystem. Typically user data goes thru this
- * path; one of the exceptions is the superblock.
- */
-void
-xfsbdstrat(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- ASSERT(mp);
- if (!XFS_FORCED_SHUTDOWN(mp)) {
- xfs_buf_iorequest(bp);
- return;
- }
-
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- xfs_bioerror_relse(bp);
-}
-
-/*
* If the underlying (data/log/rt) device is readonly, there are some
* operations that cannot proceed.
*/
diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h
index d1f7789c7ffb..342ae8c0d011 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.h
+++ b/fs/xfs/linux-2.6/xfs_lrw.h
@@ -22,9 +22,6 @@ struct xfs_mount;
struct xfs_inode;
struct xfs_buf;
-/* errors from xfsbdstrat() must be extracted from the buffer */
-extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-extern int xfs_bdstrat_cb(struct xfs_buf *);
extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
extern int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t);
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 09783cc444ac..25ea2408118f 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -877,12 +877,11 @@ xfsaild(
{
struct xfs_ail *ailp = data;
xfs_lsn_t last_pushed_lsn = 0;
- long tout = 0;
+ long tout = 0; /* milliseconds */
while (!kthread_should_stop()) {
- if (tout)
- schedule_timeout_interruptible(msecs_to_jiffies(tout));
- tout = 1000;
+ schedule_timeout_interruptible(tout ?
+ msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT);
/* swsusp */
try_to_freeze();
@@ -954,16 +953,14 @@ xfs_fs_destroy_inode(
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
/*
- * If we have nothing to flush with this inode then complete the
- * teardown now, otherwise delay the flush operation.
+ * We always use background reclaim here because even if the
+ * inode is clean, it still may be under IO and hence we have
+ * to take the flush lock. The background reclaim path handles
+ * this more efficiently than we can here, so simply let background
+ * reclaim tear down all inodes.
*/
- if (!xfs_inode_clean(ip)) {
- xfs_inode_set_reclaim_tag(ip);
- return;
- }
-
out_reclaim:
- xfs_ireclaim(ip);
+ xfs_inode_set_reclaim_tag(ip);
}
/*
@@ -1024,12 +1021,45 @@ xfs_fs_dirty_inode(
XFS_I(inode)->i_update_core = 1;
}
-/*
- * Attempt to flush the inode, this will actually fail
- * if the inode is pinned, but we dirty the inode again
- * at the point when it is unpinned after a log write,
- * since this is when the inode itself becomes flushable.
- */
+STATIC int
+xfs_log_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_trans *tp;
+ int error;
+
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
+ error = xfs_trans_reserve(tp, 0, XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
+
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ /* we need to return with the lock hold shared */
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ return error;
+ }
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+ /*
+ * Note - it's possible that we might have pushed ourselves out of the
+ * way during trans_reserve which would flush the inode. But there's
+ * no guarantee that the inode buffer has actually gone out yet (it's
+ * delwri). Plus the buffer could be pinned anyway if it's part of
+ * an inode in another recent transaction. So we play it safe and
+ * fire off the transaction anyway.
+ */
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+ xfs_trans_ihold(tp, ip);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ xfs_trans_set_sync(tp);
+ error = xfs_trans_commit(tp, 0);
+ xfs_ilock_demote(ip, XFS_ILOCK_EXCL);
+
+ return error;
+}
+
STATIC int
xfs_fs_write_inode(
struct inode *inode,
@@ -1037,7 +1067,7 @@ xfs_fs_write_inode(
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- int error = 0;
+ int error = EAGAIN;
xfs_itrace_entry(ip);
@@ -1048,35 +1078,55 @@ xfs_fs_write_inode(
error = xfs_wait_on_pages(ip, 0, -1);
if (error)
goto out;
- }
- /*
- * Bypass inodes which have already been cleaned by
- * the inode flush clustering code inside xfs_iflush
- */
- if (xfs_inode_clean(ip))
- goto out;
-
- /*
- * We make this non-blocking if the inode is contended, return
- * EAGAIN to indicate to the caller that they did not succeed.
- * This prevents the flush path from blocking on inodes inside
- * another operation right now, they get caught later by xfs_sync.
- */
- if (sync) {
+ /*
+ * Make sure the inode has hit stable storage. By using the
+ * log and the fsync transactions we reduce the IOs we have
+ * to do here from two (log and inode) to just the log.
+ *
+ * Note: We still need to do a delwri write of the inode after
+ * this to flush it to the backing buffer so that bulkstat
+ * works properly if this is the first time the inode has been
+ * written. Because we hold the ilock atomically over the
+ * transaction commit and the inode flush we are guaranteed
+ * that the inode is not pinned when it returns. If the flush
+ * lock is already held, then the inode has already been
+ * flushed once and we don't need to flush it again. Hence
+ * the code will only flush the inode if it isn't already
+ * being flushed.
+ */
xfs_ilock(ip, XFS_ILOCK_SHARED);
- xfs_iflock(ip);
-
- error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
+ if (ip->i_update_core) {
+ error = xfs_log_inode(ip);
+ if (error)
+ goto out_unlock;
+ }
} else {
- error = EAGAIN;
+ /*
+ * We make this non-blocking if the inode is contended, return
+ * EAGAIN to indicate to the caller that they did not succeed.
+ * This prevents the flush path from blocking on inodes inside
+ * another operation right now, they get caught later by xfs_sync.
+ */
if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED))
goto out;
- if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
- goto out_unlock;
+ }
+
+ if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip))
+ goto out_unlock;
- error = xfs_iflush(ip, XFS_IFLUSH_ASYNC_NOBLOCK);
+ /*
+ * Now we have the flush lock and the inode is not pinned, we can check
+ * if the inode is really clean as we know that there are no pending
+ * transaction completions, it is not waiting on the delayed write
+ * queue and there is no IO in progress.
+ */
+ if (xfs_inode_clean(ip)) {
+ xfs_ifunlock(ip);
+ error = 0;
+ goto out_unlock;
}
+ error = xfs_iflush(ip, 0);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -1259,6 +1309,29 @@ xfs_fs_statfs(
return 0;
}
+STATIC void
+xfs_save_resvblks(struct xfs_mount *mp)
+{
+ __uint64_t resblks = 0;
+
+ mp->m_resblks_save = mp->m_resblks;
+ xfs_reserve_blocks(mp, &resblks, NULL);
+}
+
+STATIC void
+xfs_restore_resvblks(struct xfs_mount *mp)
+{
+ __uint64_t resblks;
+
+ if (mp->m_resblks_save) {
+ resblks = mp->m_resblks_save;
+ mp->m_resblks_save = 0;
+ } else
+ resblks = xfs_default_resblks(mp);
+
+ xfs_reserve_blocks(mp, &resblks, NULL);
+}
+
STATIC int
xfs_fs_remount(
struct super_block *sb,
@@ -1338,11 +1411,27 @@ xfs_fs_remount(
}
mp->m_update_flags = 0;
}
+
+ /*
+ * Fill out the reserve pool if it is empty. Use the stashed
+ * value if it is non-zero, otherwise go with the default.
+ */
+ xfs_restore_resvblks(mp);
}
/* rw -> ro */
if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+ /*
+ * After we have synced the data but before we sync the
+ * metadata, we need to free up the reserve block pool so that
+ * the used block count in the superblock on disk is correct at
+ * the end of the remount. Stash the current reserve pool size
+ * so that if we get remounted rw, we can return it to the same
+ * size.
+ */
+
xfs_quiesce_data(mp);
+ xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
mp->m_flags |= XFS_MOUNT_RDONLY;
}
@@ -1361,11 +1450,22 @@ xfs_fs_freeze(
{
struct xfs_mount *mp = XFS_M(sb);
+ xfs_save_resvblks(mp);
xfs_quiesce_attr(mp);
return -xfs_fs_log_dummy(mp);
}
STATIC int
+xfs_fs_unfreeze(
+ struct super_block *sb)
+{
+ struct xfs_mount *mp = XFS_M(sb);
+
+ xfs_restore_resvblks(mp);
+ return 0;
+}
+
+STATIC int
xfs_fs_show_options(
struct seq_file *m,
struct vfsmount *mnt)
@@ -1587,6 +1687,7 @@ static const struct super_operations xfs_super_operations = {
.put_super = xfs_fs_put_super,
.sync_fs = xfs_fs_sync_fs,
.freeze_fs = xfs_fs_freeze,
+ .unfreeze_fs = xfs_fs_unfreeze,
.statfs = xfs_fs_statfs,
.remount_fs = xfs_fs_remount,
.show_options = xfs_fs_show_options,
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 6fed97a8cd3e..a9f6d20aff41 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -65,7 +65,6 @@ xfs_inode_ag_lookup(
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
- read_lock(&pag->pag_ici_lock);
if (tag == XFS_ICI_NO_TAG) {
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)&ip, *first_index, 1);
@@ -74,7 +73,7 @@ xfs_inode_ag_lookup(
(void **)&ip, *first_index, 1, tag);
}
if (!nr_found)
- goto unlock;
+ return NULL;
/*
* Update the index for the next lookup. Catch overflows
@@ -84,25 +83,20 @@ xfs_inode_ag_lookup(
*/
*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- goto unlock;
-
+ return NULL;
return ip;
-
-unlock:
- read_unlock(&pag->pag_ici_lock);
- return NULL;
}
STATIC int
xfs_inode_ag_walk(
struct xfs_mount *mp,
- xfs_agnumber_t ag,
+ struct xfs_perag *pag,
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
- int tag)
+ int tag,
+ int exclusive)
{
- struct xfs_perag *pag = &mp->m_perag[ag];
uint32_t first_index;
int last_error = 0;
int skipped;
@@ -114,10 +108,20 @@ restart:
int error = 0;
xfs_inode_t *ip;
+ if (exclusive)
+ write_lock(&pag->pag_ici_lock);
+ else
+ read_lock(&pag->pag_ici_lock);
ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
- if (!ip)
+ if (!ip) {
+ if (exclusive)
+ write_unlock(&pag->pag_ici_lock);
+ else
+ read_unlock(&pag->pag_ici_lock);
break;
+ }
+ /* execute releases pag->pag_ici_lock */
error = execute(ip, pag, flags);
if (error == EAGAIN) {
skipped++;
@@ -125,9 +129,8 @@ restart:
}
if (error)
last_error = error;
- /*
- * bail out if the filesystem is corrupted.
- */
+
+ /* bail out if the filesystem is corrupted. */
if (error == EFSCORRUPTED)
break;
@@ -137,8 +140,6 @@ restart:
delay(1);
goto restart;
}
-
- xfs_put_perag(mp, pag);
return last_error;
}
@@ -148,16 +149,24 @@ xfs_inode_ag_iterator(
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
- int tag)
+ int tag,
+ int exclusive)
{
int error = 0;
int last_error = 0;
xfs_agnumber_t ag;
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
- if (!mp->m_perag[ag].pag_ici_init)
+ struct xfs_perag *pag;
+
+ pag = xfs_perag_get(mp, ag);
+ if (!pag->pag_ici_init) {
+ xfs_perag_put(pag);
continue;
- error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
+ }
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
+ exclusive);
+ xfs_perag_put(pag);
if (error) {
last_error = error;
if (error == EFSCORRUPTED)
@@ -174,30 +183,31 @@ xfs_sync_inode_valid(
struct xfs_perag *pag)
{
struct inode *inode = VFS_I(ip);
+ int error = EFSCORRUPTED;
/* nothing to sync during shutdown */
- if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- read_unlock(&pag->pag_ici_lock);
- return EFSCORRUPTED;
- }
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ goto out_unlock;
- /*
- * If we can't get a reference on the inode, it must be in reclaim.
- * Leave it for the reclaim code to flush. Also avoid inodes that
- * haven't been fully initialised.
- */
- if (!igrab(inode)) {
- read_unlock(&pag->pag_ici_lock);
- return ENOENT;
- }
- read_unlock(&pag->pag_ici_lock);
+ /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+ error = ENOENT;
+ if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ goto out_unlock;
+
+ /* If we can't grab the inode, it must on it's way to reclaim. */
+ if (!igrab(inode))
+ goto out_unlock;
- if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
+ if (is_bad_inode(inode)) {
IRELE(ip);
- return ENOENT;
+ goto out_unlock;
}
- return 0;
+ /* inode is valid */
+ error = 0;
+out_unlock:
+ read_unlock(&pag->pag_ici_lock);
+ return error;
}
STATIC int
@@ -224,7 +234,7 @@ xfs_sync_inode_data(
}
error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
- 0 : XFS_B_ASYNC, FI_NONE);
+ 0 : XBF_ASYNC, FI_NONE);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
out_wait:
@@ -260,8 +270,7 @@ xfs_sync_inode_attr(
goto out_unlock;
}
- error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
- XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
+ error = xfs_iflush(ip, flags);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
@@ -282,14 +291,11 @@ xfs_sync_data(
ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
- XFS_ICI_NO_TAG);
+ XFS_ICI_NO_TAG, 0);
if (error)
return XFS_ERROR(error);
- xfs_log_force(mp, 0,
- (flags & SYNC_WAIT) ?
- XFS_LOG_FORCE | XFS_LOG_SYNC :
- XFS_LOG_FORCE);
+ xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
return 0;
}
@@ -304,7 +310,7 @@ xfs_sync_attr(
ASSERT((flags & ~SYNC_WAIT) == 0);
return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
- XFS_ICI_NO_TAG);
+ XFS_ICI_NO_TAG, 0);
}
STATIC int
@@ -315,10 +321,6 @@ xfs_commit_dummy_trans(
struct xfs_inode *ip = mp->m_rootip;
struct xfs_trans *tp;
int error;
- int log_flags = XFS_LOG_FORCE;
-
- if (flags & SYNC_WAIT)
- log_flags |= XFS_LOG_SYNC;
/*
* Put a dummy transaction in the log to tell recovery
@@ -340,11 +342,11 @@ xfs_commit_dummy_trans(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
/* the log force ensures this transaction is pushed to disk */
- xfs_log_force(mp, 0, log_flags);
+ xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
return error;
}
-int
+STATIC int
xfs_sync_fsdata(
struct xfs_mount *mp,
int flags)
@@ -360,7 +362,7 @@ xfs_sync_fsdata(
if (flags & SYNC_TRYLOCK) {
ASSERT(!(flags & SYNC_WAIT));
- bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
+ bp = xfs_getsb(mp, XBF_TRYLOCK);
if (!bp)
goto out;
@@ -380,7 +382,7 @@ xfs_sync_fsdata(
* become pinned in between there and here.
*/
if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(mp, 0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
}
@@ -441,9 +443,6 @@ xfs_quiesce_data(
xfs_sync_data(mp, SYNC_WAIT);
xfs_qm_sync(mp, SYNC_WAIT);
- /* drop inode references pinned by filestreams */
- xfs_filestream_flush(mp);
-
/* write superblock and hoover up shutdown errors */
error = xfs_sync_fsdata(mp, SYNC_WAIT);
@@ -460,16 +459,18 @@ xfs_quiesce_fs(
{
int count = 0, pincount;
+ xfs_reclaim_inodes(mp, 0);
xfs_flush_buftarg(mp->m_ddev_targp, 0);
- xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
/*
* This loop must run at least twice. The first instance of the loop
* will flush most meta data but that will generate more meta data
* (typically directory updates). Which then must be flushed and
- * logged before we can write the unmount record.
+ * logged before we can write the unmount record. We also so sync
+ * reclaim of inodes to catch any that the above delwri flush skipped.
*/
do {
+ xfs_reclaim_inodes(mp, SYNC_WAIT);
xfs_sync_attr(mp, SYNC_WAIT);
pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
if (!pincount) {
@@ -568,7 +569,7 @@ xfs_flush_inodes(
igrab(inode);
xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
wait_for_completion(&completion);
- xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
+ xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
}
/*
@@ -584,8 +585,8 @@ xfs_sync_worker(
int error;
if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
- xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
+ xfs_log_force(mp, 0);
+ xfs_reclaim_inodes(mp, 0);
/* dgc: errors ignored here */
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
@@ -664,60 +665,6 @@ xfs_syncd_stop(
kthread_stop(mp->m_sync_task);
}
-STATIC int
-xfs_reclaim_inode(
- xfs_inode_t *ip,
- int sync_mode)
-{
- xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
-
- /* The hash lock here protects a thread in xfs_iget_core from
- * racing with us on linking the inode back with a vnode.
- * Once we have the XFS_IRECLAIM flag set it will not touch
- * us.
- */
- write_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
- if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
- !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
- return -EAGAIN;
- }
- __xfs_iflags_set(ip, XFS_IRECLAIM);
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
- xfs_put_perag(ip->i_mount, pag);
-
- /*
- * If the inode is still dirty, then flush it out. If the inode
- * is not in the AIL, then it will be OK to flush it delwri as
- * long as xfs_iflush() does not keep any references to the inode.
- * We leave that decision up to xfs_iflush() since it has the
- * knowledge of whether it's OK to simply do a delwri flush of
- * the inode or whether we need to wait until the inode is
- * pulled from the AIL.
- * We get the flush lock regardless, though, just to make sure
- * we don't free it while it is being flushed.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
-
- /*
- * In the case of a forced shutdown we rely on xfs_iflush() to
- * wait for the inode to be unpinned before returning an error.
- */
- if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
- /* synchronize with xfs_iflush_done */
- xfs_iflock(ip);
- xfs_ifunlock(ip);
- }
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_ireclaim(ip);
- return 0;
-}
-
void
__xfs_inode_set_reclaim_tag(
struct xfs_perag *pag,
@@ -737,16 +684,17 @@ void
xfs_inode_set_reclaim_tag(
xfs_inode_t *ip)
{
- xfs_mount_t *mp = ip->i_mount;
- xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
read_lock(&pag->pag_ici_lock);
spin_lock(&ip->i_flags_lock);
__xfs_inode_set_reclaim_tag(pag, ip);
__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
}
void
@@ -759,20 +707,145 @@ __xfs_inode_clear_reclaim_tag(
XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
}
+/*
+ * Inodes in different states need to be treated differently, and the return
+ * value of xfs_iflush is not sufficient to get this right. The following table
+ * lists the inode states and the reclaim actions necessary for non-blocking
+ * reclaim:
+ *
+ *
+ * inode state iflush ret required action
+ * --------------- ---------- ---------------
+ * bad - reclaim
+ * shutdown EIO unpin and reclaim
+ * clean, unpinned 0 reclaim
+ * stale, unpinned 0 reclaim
+ * clean, pinned(*) 0 requeue
+ * stale, pinned EAGAIN requeue
+ * dirty, delwri ok 0 requeue
+ * dirty, delwri blocked EAGAIN requeue
+ * dirty, sync flush 0 reclaim
+ *
+ * (*) dgc: I don't think the clean, pinned state is possible but it gets
+ * handled anyway given the order of checks implemented.
+ *
+ * As can be seen from the table, the return value of xfs_iflush() is not
+ * sufficient to correctly decide the reclaim action here. The checks in
+ * xfs_iflush() might look like duplicates, but they are not.
+ *
+ * Also, because we get the flush lock first, we know that any inode that has
+ * been flushed delwri has had the flush completed by the time we check that
+ * the inode is clean. The clean inode check needs to be done before flushing
+ * the inode delwri otherwise we would loop forever requeuing clean inodes as
+ * we cannot tell apart a successful delwri flush and a clean inode from the
+ * return value of xfs_iflush().
+ *
+ * Note that because the inode is flushed delayed write by background
+ * writeback, the flush lock may already be held here and waiting on it can
+ * result in very long latencies. Hence for sync reclaims, where we wait on the
+ * flush lock, the caller should push out delayed write inodes first before
+ * trying to reclaim them to minimise the amount of time spent waiting. For
+ * background relaim, we just requeue the inode for the next pass.
+ *
+ * Hence the order of actions after gaining the locks should be:
+ * bad => reclaim
+ * shutdown => unpin and reclaim
+ * pinned, delwri => requeue
+ * pinned, sync => unpin
+ * stale => reclaim
+ * clean => reclaim
+ * dirty, delwri => flush and requeue
+ * dirty, sync => flush, wait and reclaim
+ */
STATIC int
-xfs_reclaim_inode_now(
+xfs_reclaim_inode(
struct xfs_inode *ip,
struct xfs_perag *pag,
- int flags)
+ int sync_mode)
{
- /* ignore if already under reclaim */
- if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
- read_unlock(&pag->pag_ici_lock);
+ int error = 0;
+
+ /*
+ * The radix tree lock here protects a thread in xfs_iget from racing
+ * with us starting reclaim on the inode. Once we have the
+ * XFS_IRECLAIM flag set it will not touch us.
+ */
+ spin_lock(&ip->i_flags_lock);
+ ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
+ if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
+ /* ignore as it is already under reclaim */
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
return 0;
}
- read_unlock(&pag->pag_ici_lock);
+ __xfs_iflags_set(ip, XFS_IRECLAIM);
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ if (!xfs_iflock_nowait(ip)) {
+ if (!(sync_mode & SYNC_WAIT))
+ goto out;
+ xfs_iflock(ip);
+ }
+
+ if (is_bad_inode(VFS_I(ip)))
+ goto reclaim;
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_iunpin_wait(ip);
+ goto reclaim;
+ }
+ if (xfs_ipincount(ip)) {
+ if (!(sync_mode & SYNC_WAIT)) {
+ xfs_ifunlock(ip);
+ goto out;
+ }
+ xfs_iunpin_wait(ip);
+ }
+ if (xfs_iflags_test(ip, XFS_ISTALE))
+ goto reclaim;
+ if (xfs_inode_clean(ip))
+ goto reclaim;
+
+ /* Now we have an inode that needs flushing */
+ error = xfs_iflush(ip, sync_mode);
+ if (sync_mode & SYNC_WAIT) {
+ xfs_iflock(ip);
+ goto reclaim;
+ }
+
+ /*
+ * When we have to flush an inode but don't have SYNC_WAIT set, we
+ * flush the inode out using a delwri buffer and wait for the next
+ * call into reclaim to find it in a clean state instead of waiting for
+ * it now. We also don't return errors here - if the error is transient
+ * then the next reclaim pass will flush the inode, and if the error
+ * is permanent then the next sync reclaim will relcaim the inode and
+ * pass on the error.
+ */
+ if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
+ xfs_fs_cmn_err(CE_WARN, ip->i_mount,
+ "inode 0x%llx background reclaim flush failed with %d",
+ (long long)ip->i_ino, error);
+ }
+out:
+ xfs_iflags_clear(ip, XFS_IRECLAIM);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ /*
+ * We could return EAGAIN here to make reclaim rescan the inode tree in
+ * a short while. However, this just burns CPU time scanning the tree
+ * waiting for IO to complete and xfssyncd never goes back to the idle
+ * state. Instead, return 0 to let the next scheduled background reclaim
+ * attempt to reclaim the inode again.
+ */
+ return 0;
+
+reclaim:
+ xfs_ifunlock(ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_ireclaim(ip);
+ return error;
- return xfs_reclaim_inode(ip, flags);
}
int
@@ -780,6 +853,6 @@ xfs_reclaim_inodes(
xfs_mount_t *mp,
int mode)
{
- return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
- XFS_ICI_RECLAIM_TAG);
+ return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
+ XFS_ICI_RECLAIM_TAG, 1);
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index a500b4d91835..d480c346cabb 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -37,7 +37,6 @@ void xfs_syncd_stop(struct xfs_mount *mp);
int xfs_sync_attr(struct xfs_mount *mp, int flags);
int xfs_sync_data(struct xfs_mount *mp, int flags);
-int xfs_sync_fsdata(struct xfs_mount *mp, int flags);
int xfs_quiesce_data(struct xfs_mount *mp);
void xfs_quiesce_attr(struct xfs_mount *mp);
@@ -54,6 +53,6 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
- int flags, int tag);
+ int flags, int tag, int write_lock);
#endif
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index c22a608321a3..a4574dcf5065 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -78,6 +78,33 @@ DECLARE_EVENT_CLASS(xfs_attr_list_class,
)
)
+#define DEFINE_PERAG_REF_EVENT(name) \
+TRACE_EVENT(name, \
+ TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, int refcount, \
+ unsigned long caller_ip), \
+ TP_ARGS(mp, agno, refcount, caller_ip), \
+ TP_STRUCT__entry( \
+ __field(dev_t, dev) \
+ __field(xfs_agnumber_t, agno) \
+ __field(int, refcount) \
+ __field(unsigned long, caller_ip) \
+ ), \
+ TP_fast_assign( \
+ __entry->dev = mp->m_super->s_dev; \
+ __entry->agno = agno; \
+ __entry->refcount = refcount; \
+ __entry->caller_ip = caller_ip; \
+ ), \
+ TP_printk("dev %d:%d agno %u refcount %d caller %pf", \
+ MAJOR(__entry->dev), MINOR(__entry->dev), \
+ __entry->agno, \
+ __entry->refcount, \
+ (char *)__entry->caller_ip) \
+);
+
+DEFINE_PERAG_REF_EVENT(xfs_perag_get)
+DEFINE_PERAG_REF_EVENT(xfs_perag_put)
+
#define DEFINE_ATTR_LIST_EVENT(name) \
DEFINE_EVENT(xfs_attr_list_class, name, \
TP_PROTO(struct xfs_attr_list_context *ctx), \
@@ -456,6 +483,7 @@ DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_unlock_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_committed);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_push);
+DEFINE_BUF_ITEM_EVENT(xfs_buf_item_pushbuf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf);
DEFINE_BUF_ITEM_EVENT(xfs_trans_get_buf_recur);
DEFINE_BUF_ITEM_EVENT(xfs_trans_getsb);
@@ -1414,6 +1442,59 @@ TRACE_EVENT(xfs_dir2_leafn_moveents,
__entry->count)
);
+#define XFS_SWAPEXT_INODES \
+ { 0, "target" }, \
+ { 1, "temp" }
+
+#define XFS_INODE_FORMAT_STR \
+ { 0, "invalid" }, \
+ { 1, "local" }, \
+ { 2, "extent" }, \
+ { 3, "btree" }
+
+DECLARE_EVENT_CLASS(xfs_swap_extent_class,
+ TP_PROTO(struct xfs_inode *ip, int which),
+ TP_ARGS(ip, which),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(int, which)
+ __field(xfs_ino_t, ino)
+ __field(int, format)
+ __field(int, nex)
+ __field(int, max_nex)
+ __field(int, broot_size)
+ __field(int, fork_off)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->which = which;
+ __entry->ino = ip->i_ino;
+ __entry->format = ip->i_d.di_format;
+ __entry->nex = ip->i_d.di_nextents;
+ __entry->max_nex = ip->i_df.if_ext_max;
+ __entry->broot_size = ip->i_df.if_broot_bytes;
+ __entry->fork_off = XFS_IFORK_BOFF(ip);
+ ),
+ TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
+ "Max in-fork extents %d, broot size %d, fork offset %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
+ __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
+ __entry->nex,
+ __entry->max_nex,
+ __entry->broot_size,
+ __entry->fork_off)
+)
+
+#define DEFINE_SWAPEXT_EVENT(name) \
+DEFINE_EVENT(xfs_swap_extent_class, name, \
+ TP_PROTO(struct xfs_inode *ip, int which), \
+ TP_ARGS(ip, which))
+
+DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
+DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
+
#endif /* _TRACE_XFS_H */
#undef TRACE_INCLUDE_PATH
diff --git a/fs/xfs/linux-2.6/xfs_xattr.c b/fs/xfs/linux-2.6/xfs_xattr.c
index 0b1878857fc3..fa01b9daba6b 100644
--- a/fs/xfs/linux-2.6/xfs_xattr.c
+++ b/fs/xfs/linux-2.6/xfs_xattr.c
@@ -45,7 +45,7 @@ xfs_xattr_get(struct dentry *dentry, const char *name,
value = NULL;
}
- error = -xfs_attr_get(ip, name, value, &asize, xflags);
+ error = -xfs_attr_get(ip, (unsigned char *)name, value, &asize, xflags);
if (error)
return error;
return asize;
@@ -67,8 +67,9 @@ xfs_xattr_set(struct dentry *dentry, const char *name, const void *value,
xflags |= ATTR_REPLACE;
if (!value)
- return -xfs_attr_remove(ip, name, xflags);
- return -xfs_attr_set(ip, name, (void *)value, size, xflags);
+ return -xfs_attr_remove(ip, (unsigned char *)name, xflags);
+ return -xfs_attr_set(ip, (unsigned char *)name,
+ (void *)value, size, xflags);
}
static struct xattr_handler xfs_xattr_user_handler = {
@@ -124,8 +125,13 @@ static const char *xfs_xattr_prefix(int flags)
}
static int
-xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
- char *name, int namelen, int valuelen, char *value)
+xfs_xattr_put_listent(
+ struct xfs_attr_list_context *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen,
+ unsigned char *value)
{
unsigned int prefix_len = xfs_xattr_prefix_len(flags);
char *offset;
@@ -148,7 +154,7 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
offset = (char *)context->alist + context->count;
strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
offset += prefix_len;
- strncpy(offset, name, namelen); /* real name */
+ strncpy(offset, (char *)name, namelen); /* real name */
offset += namelen;
*offset = '\0';
context->count += prefix_len + namelen + 1;
@@ -156,8 +162,13 @@ xfs_xattr_put_listent(struct xfs_attr_list_context *context, int flags,
}
static int
-xfs_xattr_put_listent_sizes(struct xfs_attr_list_context *context, int flags,
- char *name, int namelen, int valuelen, char *value)
+xfs_xattr_put_listent_sizes(
+ struct xfs_attr_list_context *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen,
+ unsigned char *value)
{
context->count += xfs_xattr_prefix_len(flags) + namelen + 1;
return 0;
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index d7c7eea09fc2..5f79dd78626b 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -1187,7 +1187,7 @@ xfs_qm_dqflush(
* block, nada.
*/
if (!XFS_DQ_IS_DIRTY(dqp) ||
- (!(flags & XFS_QMOPT_SYNC) && atomic_read(&dqp->q_pincount) > 0)) {
+ (!(flags & SYNC_WAIT) && atomic_read(&dqp->q_pincount) > 0)) {
xfs_dqfunlock(dqp);
return 0;
}
@@ -1248,23 +1248,20 @@ xfs_qm_dqflush(
*/
if (XFS_BUF_ISPINNED(bp)) {
trace_xfs_dqflush_force(dqp);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
}
- if (flags & XFS_QMOPT_DELWRI) {
- xfs_bdwrite(mp, bp);
- } else if (flags & XFS_QMOPT_ASYNC) {
- error = xfs_bawrite(mp, bp);
- } else {
+ if (flags & SYNC_WAIT)
error = xfs_bwrite(mp, bp);
- }
+ else
+ xfs_bdwrite(mp, bp);
trace_xfs_dqflush_done(dqp);
/*
* dqp is still locked, but caller is free to unlock it now.
*/
- return (error);
+ return error;
}
@@ -1445,7 +1442,7 @@ xfs_qm_dqpurge(
* We don't care about getting disk errors here. We need
* to purge this dquot anyway, so we go ahead regardless.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_SYNC);
+ error = xfs_qm_dqflush(dqp, SYNC_WAIT);
if (error)
xfs_fs_cmn_err(CE_WARN, mp,
"xfs_qm_dqpurge: dquot %p flush failed", dqp);
@@ -1529,25 +1526,17 @@ xfs_qm_dqflock_pushbuf_wait(
* the flush lock when the I/O completes.
*/
bp = xfs_incore(dqp->q_mount->m_ddev_targp, dqp->q_blkno,
- XFS_QI_DQCHUNKLEN(dqp->q_mount),
- XFS_INCORE_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- int error;
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(dqp->q_mount,
- (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- error = xfs_bawrite(dqp->q_mount, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
- "xfs_qm_dqflock_pushbuf_wait: "
- "pushbuf error %d on dqp %p, bp %p",
- error, dqp, bp);
- } else {
- xfs_buf_relse(bp);
- }
+ XFS_QI_DQCHUNKLEN(dqp->q_mount), XBF_TRYLOCK);
+ if (!bp)
+ goto out_lock;
+
+ if (XFS_BUF_ISDELAYWRITE(bp)) {
+ if (XFS_BUF_ISPINNED(bp))
+ xfs_log_force(dqp->q_mount, 0);
+ xfs_buf_delwri_promote(bp);
+ wake_up_process(bp->b_target->bt_task);
}
+ xfs_buf_relse(bp);
+out_lock:
xfs_dqflock(dqp);
}
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index d0d4a9a0bbd7..4e4ee9a57194 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -74,11 +74,11 @@ xfs_qm_dquot_logitem_format(
logvec->i_addr = (xfs_caddr_t)&logitem->qli_format;
logvec->i_len = sizeof(xfs_dq_logformat_t);
- XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_QFORMAT);
+ logvec->i_type = XLOG_REG_TYPE_QFORMAT;
logvec++;
logvec->i_addr = (xfs_caddr_t)&logitem->qli_dquot->q_core;
logvec->i_len = sizeof(xfs_disk_dquot_t);
- XLOG_VEC_SET_TYPE(logvec, XLOG_REG_TYPE_DQUOT);
+ logvec->i_type = XLOG_REG_TYPE_DQUOT;
ASSERT(2 == logitem->qli_item.li_desc->lid_size);
logitem->qli_format.qlf_size = 2;
@@ -153,7 +153,7 @@ xfs_qm_dquot_logitem_push(
* lock without sleeping, then there must not have been
* anyone in the process of flushing the dquot.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush(dqp, 0);
if (error)
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
"xfs_qm_dquot_logitem_push: push error %d on dqp %p",
@@ -190,7 +190,7 @@ xfs_qm_dqunpin_wait(
/*
* Give the log a push so we don't wait here too long.
*/
- xfs_log_force(dqp->q_mount, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(dqp->q_mount, 0);
wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
}
@@ -212,68 +212,31 @@ xfs_qm_dquot_logitem_pushbuf(
xfs_dquot_t *dqp;
xfs_mount_t *mp;
xfs_buf_t *bp;
- uint dopush;
dqp = qip->qli_dquot;
ASSERT(XFS_DQ_IS_LOCKED(dqp));
/*
- * The qli_pushbuf_flag keeps others from
- * trying to duplicate our effort.
- */
- ASSERT(qip->qli_pushbuf_flag != 0);
- ASSERT(qip->qli_push_owner == current_pid());
-
- /*
* If flushlock isn't locked anymore, chances are that the
* inode flush completed and the inode was taken off the AIL.
* So, just get out.
*/
if (completion_done(&dqp->q_flush) ||
((qip->qli_item.li_flags & XFS_LI_IN_AIL) == 0)) {
- qip->qli_pushbuf_flag = 0;
xfs_dqunlock(dqp);
return;
}
mp = dqp->q_mount;
bp = xfs_incore(mp->m_ddev_targp, qip->qli_format.qlf_blkno,
- XFS_QI_DQCHUNKLEN(mp),
- XFS_INCORE_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- dopush = ((qip->qli_item.li_flags & XFS_LI_IN_AIL) &&
- !completion_done(&dqp->q_flush));
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
-
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(mp, (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- if (dopush) {
- int error;
-#ifdef XFSRACEDEBUG
- delay_for_intr();
- delay(300);
-#endif
- error = xfs_bawrite(mp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_qm_dquot_logitem_pushbuf: pushbuf error %d on qip %p, bp %p",
- error, qip, bp);
- } else {
- xfs_buf_relse(bp);
- }
- } else {
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
- xfs_buf_relse(bp);
- }
+ XFS_QI_DQCHUNKLEN(mp), XBF_TRYLOCK);
+ xfs_dqunlock(dqp);
+ if (!bp)
return;
- }
+ if (XFS_BUF_ISDELAYWRITE(bp))
+ xfs_buf_delwri_promote(bp);
+ xfs_buf_relse(bp);
+ return;
- qip->qli_pushbuf_flag = 0;
- xfs_dqunlock(dqp);
}
/*
@@ -291,50 +254,24 @@ xfs_qm_dquot_logitem_trylock(
xfs_dq_logitem_t *qip)
{
xfs_dquot_t *dqp;
- uint retval;
dqp = qip->qli_dquot;
if (atomic_read(&dqp->q_pincount) > 0)
- return (XFS_ITEM_PINNED);
+ return XFS_ITEM_PINNED;
if (! xfs_qm_dqlock_nowait(dqp))
- return (XFS_ITEM_LOCKED);
+ return XFS_ITEM_LOCKED;
- retval = XFS_ITEM_SUCCESS;
if (!xfs_dqflock_nowait(dqp)) {
/*
- * The dquot is already being flushed. It may have been
- * flushed delayed write, however, and we don't want to
- * get stuck waiting for that to complete. So, we want to check
- * to see if we can lock the dquot's buffer without sleeping.
- * If we can and it is marked for delayed write, then we
- * hold it and send it out from the push routine. We don't
- * want to do that now since we might sleep in the device
- * strategy routine. We also don't want to grab the buffer lock
- * here because we'd like not to call into the buffer cache
- * while holding the AIL lock.
- * Make sure to only return PUSHBUF if we set pushbuf_flag
- * ourselves. If someone else is doing it then we don't
- * want to go to the push routine and duplicate their efforts.
+ * dquot has already been flushed to the backing buffer,
+ * leave it locked, pushbuf routine will unlock it.
*/
- if (qip->qli_pushbuf_flag == 0) {
- qip->qli_pushbuf_flag = 1;
- ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
-#ifdef DEBUG
- qip->qli_push_owner = current_pid();
-#endif
- /*
- * The dquot is left locked.
- */
- retval = XFS_ITEM_PUSHBUF;
- } else {
- retval = XFS_ITEM_FLUSHING;
- xfs_dqunlock_nonotify(dqp);
- }
+ return XFS_ITEM_PUSHBUF;
}
ASSERT(qip->qli_item.li_flags & XFS_LI_IN_AIL);
- return (retval);
+ return XFS_ITEM_SUCCESS;
}
@@ -467,7 +404,7 @@ xfs_qm_qoff_logitem_format(xfs_qoff_logitem_t *qf,
log_vector->i_addr = (xfs_caddr_t)&(qf->qql_format);
log_vector->i_len = sizeof(xfs_qoff_logitem_t);
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_QUOTAOFF);
+ log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
qf->qql_format.qf_size = 1;
}
diff --git a/fs/xfs/quota/xfs_dquot_item.h b/fs/xfs/quota/xfs_dquot_item.h
index 5a632531f843..5acae2ada70b 100644
--- a/fs/xfs/quota/xfs_dquot_item.h
+++ b/fs/xfs/quota/xfs_dquot_item.h
@@ -27,10 +27,6 @@ typedef struct xfs_dq_logitem {
xfs_log_item_t qli_item; /* common portion */
struct xfs_dquot *qli_dquot; /* dquot ptr */
xfs_lsn_t qli_flush_lsn; /* lsn at last flush */
- unsigned short qli_pushbuf_flag; /* 1 bit used in push_ail */
-#ifdef DEBUG
- uint64_t qli_push_owner;
-#endif
xfs_dq_logformat_t qli_format; /* logged structure */
} xfs_dq_logitem_t;
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 9e627a8b5b0e..417e61e3d9dd 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -118,9 +118,14 @@ xfs_Gqm_init(void)
*/
udqhash = kmem_zalloc_greedy(&hsize,
XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
- XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
- KM_SLEEP | KM_MAYFAIL | KM_LARGE);
- gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
+ XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
+ if (!udqhash)
+ goto out;
+
+ gdqhash = kmem_zalloc_large(hsize);
+ if (!gdqhash)
+ goto out_free_udqhash;
+
hsize /= sizeof(xfs_dqhash_t);
ndquot = hsize << 8;
@@ -170,6 +175,11 @@ xfs_Gqm_init(void)
mutex_init(&qcheck_lock);
#endif
return xqm;
+
+ out_free_udqhash:
+ kmem_free_large(udqhash);
+ out:
+ return NULL;
}
/*
@@ -189,8 +199,8 @@ xfs_qm_destroy(
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
}
- kmem_free(xqm->qm_usr_dqhtable);
- kmem_free(xqm->qm_grp_dqhtable);
+ kmem_free_large(xqm->qm_usr_dqhtable);
+ kmem_free_large(xqm->qm_grp_dqhtable);
xqm->qm_usr_dqhtable = NULL;
xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0;
@@ -219,8 +229,12 @@ xfs_qm_hold_quotafs_ref(
*/
mutex_lock(&xfs_Gqm_lock);
- if (xfs_Gqm == NULL)
+ if (!xfs_Gqm) {
xfs_Gqm = xfs_Gqm_init();
+ if (!xfs_Gqm)
+ return ENOMEM;
+ }
+
/*
* We can keep a list of all filesystems with quotas mounted for
* debugging and statistical purposes, but ...
@@ -436,7 +450,7 @@ xfs_qm_unmount_quotas(
STATIC int
xfs_qm_dqflush_all(
xfs_mount_t *mp,
- int flags)
+ int sync_mode)
{
int recl;
xfs_dquot_t *dqp;
@@ -472,7 +486,7 @@ again:
* across a disk write.
*/
xfs_qm_mplist_unlock(mp);
- error = xfs_qm_dqflush(dqp, flags);
+ error = xfs_qm_dqflush(dqp, sync_mode);
xfs_dqunlock(dqp);
if (error)
return error;
@@ -912,13 +926,11 @@ xfs_qm_sync(
{
int recl, restarts;
xfs_dquot_t *dqp;
- uint flush_flags;
int error;
if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
return 0;
- flush_flags = (flags & SYNC_WAIT) ? XFS_QMOPT_SYNC : XFS_QMOPT_DELWRI;
restarts = 0;
again:
@@ -978,7 +990,7 @@ xfs_qm_sync(
* across a disk write
*/
xfs_qm_mplist_unlock(mp);
- error = xfs_qm_dqflush(dqp, flush_flags);
+ error = xfs_qm_dqflush(dqp, flags);
xfs_dqunlock(dqp);
if (error && XFS_FORCED_SHUTDOWN(mp))
return 0; /* Need to prevent umount failure */
@@ -1782,7 +1794,7 @@ xfs_qm_quotacheck(
* successfully.
*/
if (!error)
- error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush_all(mp, 0);
/*
* We can get this error if we couldn't do a dquot allocation inside
@@ -2004,7 +2016,7 @@ xfs_qm_shake_freelist(
* We flush it delayed write, so don't bother
* releasing the mplock.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush(dqp, 0);
if (error) {
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
"xfs_qm_dqflush_all: dquot %p flush failed", dqp);
@@ -2187,7 +2199,7 @@ xfs_qm_dqreclaim_one(void)
* We flush it delayed write, so don't bother
* releasing the freelist lock.
*/
- error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+ error = xfs_qm_dqflush(dqp, 0);
if (error) {
xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
"xfs_qm_dqreclaim: dquot %p flush failed", dqp);
diff --git a/fs/xfs/quota/xfs_qm_bhv.c b/fs/xfs/quota/xfs_qm_bhv.c
index a5346630dfae..97b410c12794 100644
--- a/fs/xfs/quota/xfs_qm_bhv.c
+++ b/fs/xfs/quota/xfs_qm_bhv.c
@@ -59,7 +59,7 @@ xfs_fill_statvfs_from_dquot(
be64_to_cpu(dp->d_blk_hardlimit);
if (limit && statp->f_blocks > limit) {
statp->f_blocks = limit;
- statp->f_bfree =
+ statp->f_bfree = statp->f_bavail =
(statp->f_blocks > be64_to_cpu(dp->d_bcount)) ?
(statp->f_blocks - be64_to_cpu(dp->d_bcount)) : 0;
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 71af76fe8a23..5d0ee8d492db 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -891,7 +891,7 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG);
+ xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0);
}
/*------------------------------------------------------------------------*/
@@ -1192,9 +1192,9 @@ xfs_qm_internalqcheck(
if (! XFS_IS_QUOTA_ON(mp))
return XFS_ERROR(ESRCH);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
XFS_bflush(mp->m_ddev_targp);
mutex_lock(&qcheck_lock);
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 97ac9640be98..c3ab75cb1d9a 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -589,12 +589,18 @@ xfs_trans_unreserve_and_mod_dquots(
}
}
-STATIC int
-xfs_quota_error(uint flags)
+STATIC void
+xfs_quota_warn(
+ struct xfs_mount *mp,
+ struct xfs_dquot *dqp,
+ int type)
{
- if (flags & XFS_QMOPT_ENOSPC)
- return ENOSPC;
- return EDQUOT;
+ /* no warnings for project quotas - we just return ENOSPC later */
+ if (dqp->dq_flags & XFS_DQ_PROJ)
+ return;
+ quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA,
+ be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev,
+ type);
}
/*
@@ -612,7 +618,6 @@ xfs_trans_dqresv(
long ninos,
uint flags)
{
- int error;
xfs_qcnt_t hardlimit;
xfs_qcnt_t softlimit;
time_t timer;
@@ -649,7 +654,6 @@ xfs_trans_dqresv(
warnlimit = XFS_QI_RTBWARNLIMIT(dqp->q_mount);
resbcountp = &dqp->q_res_rtbcount;
}
- error = 0;
if ((flags & XFS_QMOPT_FORCE_RES) == 0 &&
dqp->q_core.d_id &&
@@ -667,18 +671,20 @@ xfs_trans_dqresv(
* nblks.
*/
if (hardlimit > 0ULL &&
- (hardlimit <= nblks + *resbcountp)) {
- error = xfs_quota_error(flags);
+ hardlimit <= nblks + *resbcountp) {
+ xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN);
goto error_return;
}
-
if (softlimit > 0ULL &&
- (softlimit <= nblks + *resbcountp)) {
+ softlimit <= nblks + *resbcountp) {
if ((timer != 0 && get_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
- error = xfs_quota_error(flags);
+ xfs_quota_warn(mp, dqp,
+ QUOTA_NL_BSOFTLONGWARN);
goto error_return;
}
+
+ xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN);
}
}
if (ninos > 0) {
@@ -692,15 +698,19 @@ xfs_trans_dqresv(
softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
if (!softlimit)
softlimit = q->qi_isoftlimit;
+
if (hardlimit > 0ULL && count >= hardlimit) {
- error = xfs_quota_error(flags);
+ xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN);
goto error_return;
- } else if (softlimit > 0ULL && count >= softlimit) {
- if ((timer != 0 && get_seconds() > timer) ||
+ }
+ if (softlimit > 0ULL && count >= softlimit) {
+ if ((timer != 0 && get_seconds() > timer) ||
(warns != 0 && warns >= warnlimit)) {
- error = xfs_quota_error(flags);
+ xfs_quota_warn(mp, dqp,
+ QUOTA_NL_ISOFTLONGWARN);
goto error_return;
}
+ xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN);
}
}
}
@@ -736,9 +746,14 @@ xfs_trans_dqresv(
ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount));
ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount));
+ xfs_dqunlock(dqp);
+ return 0;
+
error_return:
xfs_dqunlock(dqp);
- return error;
+ if (flags & XFS_QMOPT_ENOSPC)
+ return ENOSPC;
+ return EDQUOT;
}
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 00fd357c3e46..d13eeba2c8f8 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -36,8 +36,8 @@ struct xfs_acl {
};
/* On-disk XFS extended attribute names */
-#define SGI_ACL_FILE "SGI_ACL_FILE"
-#define SGI_ACL_DEFAULT "SGI_ACL_DEFAULT"
+#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
+#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index 6702bd865811..b1a5a1ff88ea 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -187,17 +187,13 @@ typedef struct xfs_perag_busy {
/*
* Per-ag incore structure, copies of information in agf and agi,
* to improve the performance of allocation group selection.
- *
- * pick sizes which fit in allocation buckets well
*/
-#if (BITS_PER_LONG == 32)
-#define XFS_PAGB_NUM_SLOTS 84
-#elif (BITS_PER_LONG == 64)
#define XFS_PAGB_NUM_SLOTS 128
-#endif
-typedef struct xfs_perag
-{
+typedef struct xfs_perag {
+ struct xfs_mount *pag_mount; /* owner filesystem */
+ xfs_agnumber_t pag_agno; /* AG this structure belongs to */
+ atomic_t pag_ref; /* perag reference count */
char pagf_init; /* this agf's entry is initialized */
char pagi_init; /* this agi's entry is initialized */
char pagf_metadata; /* the agf is preferred to be metadata */
@@ -210,8 +206,6 @@ typedef struct xfs_perag
__uint32_t pagf_btreeblks; /* # of blocks held in AGF btrees */
xfs_agino_t pagi_freecount; /* number of free inodes */
xfs_agino_t pagi_count; /* number of allocated inodes */
- int pagb_count; /* pagb slots in use */
- xfs_perag_busy_t *pagb_list; /* unstable blocks */
/*
* Inode allocation search lookup optimisation.
@@ -230,6 +224,8 @@ typedef struct xfs_perag
rwlock_t pag_ici_lock; /* incore inode lock */
struct radix_tree_root pag_ici_root; /* incore inode cache root */
#endif
+ int pagb_count; /* pagb slots in use */
+ xfs_perag_busy_t pagb_list[XFS_PAGB_NUM_SLOTS]; /* unstable blocks */
} xfs_perag_t;
/*
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 275b1f4f9430..94cddbfb2560 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1662,11 +1662,13 @@ xfs_free_ag_extent(
xfs_agf_t *agf;
xfs_perag_t *pag; /* per allocation group data */
+ pag = xfs_perag_get(mp, agno);
+ pag->pagf_freeblks += len;
+ xfs_perag_put(pag);
+
agf = XFS_BUF_TO_AGF(agbp);
- pag = &mp->m_perag[agno];
be32_add_cpu(&agf->agf_freeblks, len);
xfs_trans_agblocks_delta(tp, len);
- pag->pagf_freeblks += len;
XFS_WANT_CORRUPTED_GOTO(
be32_to_cpu(agf->agf_freeblks) <=
be32_to_cpu(agf->agf_length),
@@ -1969,10 +1971,12 @@ xfs_alloc_get_freelist(
xfs_trans_brelse(tp, agflbp);
if (be32_to_cpu(agf->agf_flfirst) == XFS_AGFL_SIZE(mp))
agf->agf_flfirst = 0;
- pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, -1);
xfs_trans_agflist_delta(tp, -1);
pag->pagf_flcount--;
+ xfs_perag_put(pag);
logflags = XFS_AGF_FLFIRST | XFS_AGF_FLCOUNT;
if (btreeblk) {
@@ -2078,7 +2082,8 @@ xfs_alloc_put_freelist(
be32_add_cpu(&agf->agf_fllast, 1);
if (be32_to_cpu(agf->agf_fllast) == XFS_AGFL_SIZE(mp))
agf->agf_fllast = 0;
- pag = &mp->m_perag[be32_to_cpu(agf->agf_seqno)];
+
+ pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno));
be32_add_cpu(&agf->agf_flcount, 1);
xfs_trans_agflist_delta(tp, 1);
pag->pagf_flcount++;
@@ -2089,6 +2094,7 @@ xfs_alloc_put_freelist(
pag->pagf_btreeblks--;
logflags |= XFS_AGF_BTREEBLKS;
}
+ xfs_perag_put(pag);
xfs_alloc_log_agf(tp, agbp, logflags);
@@ -2152,7 +2158,6 @@ xfs_read_agf(
xfs_trans_brelse(tp, *bpp);
return XFS_ERROR(EFSCORRUPTED);
}
-
XFS_BUF_SET_VTYPE_REF(*bpp, B_FS_AGF, XFS_AGF_REF);
return 0;
}
@@ -2175,7 +2180,7 @@ xfs_alloc_read_agf(
ASSERT(agno != NULLAGNUMBER);
error = xfs_read_agf(mp, tp, agno,
- (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XFS_BUF_TRYLOCK : 0,
+ (flags & XFS_ALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
bpp);
if (error)
return error;
@@ -2184,7 +2189,7 @@ xfs_alloc_read_agf(
ASSERT(!XFS_BUF_GETERROR(*bpp));
agf = XFS_BUF_TO_AGF(*bpp);
- pag = &mp->m_perag[agno];
+ pag = xfs_perag_get(mp, agno);
if (!pag->pagf_init) {
pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks);
pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks);
@@ -2195,8 +2200,8 @@ xfs_alloc_read_agf(
pag->pagf_levels[XFS_BTNUM_CNTi] =
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]);
spin_lock_init(&pag->pagb_lock);
- pag->pagb_list = kmem_zalloc(XFS_PAGB_NUM_SLOTS *
- sizeof(xfs_perag_busy_t), KM_SLEEP);
+ pag->pagb_count = 0;
+ memset(pag->pagb_list, 0, sizeof(pag->pagb_list));
pag->pagf_init = 1;
}
#ifdef DEBUG
@@ -2211,6 +2216,7 @@ xfs_alloc_read_agf(
be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi]));
}
#endif
+ xfs_perag_put(pag);
return 0;
}
@@ -2270,8 +2276,7 @@ xfs_alloc_vextent(
* These three force us into a single a.g.
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
- down_read(&mp->m_peraglock);
- args->pag = &mp->m_perag[args->agno];
+ args->pag = xfs_perag_get(mp, args->agno);
args->minleft = 0;
error = xfs_alloc_fix_freelist(args, 0);
args->minleft = minleft;
@@ -2280,14 +2285,12 @@ xfs_alloc_vextent(
goto error0;
}
if (!args->agbp) {
- up_read(&mp->m_peraglock);
trace_xfs_alloc_vextent_noagbp(args);
break;
}
args->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno);
if ((error = xfs_alloc_ag_vextent(args)))
goto error0;
- up_read(&mp->m_peraglock);
break;
case XFS_ALLOCTYPE_START_BNO:
/*
@@ -2339,9 +2342,8 @@ xfs_alloc_vextent(
* Loop over allocation groups twice; first time with
* trylock set, second time without.
*/
- down_read(&mp->m_peraglock);
for (;;) {
- args->pag = &mp->m_perag[args->agno];
+ args->pag = xfs_perag_get(mp, args->agno);
if (no_min) args->minleft = 0;
error = xfs_alloc_fix_freelist(args, flags);
args->minleft = minleft;
@@ -2400,8 +2402,8 @@ xfs_alloc_vextent(
}
}
}
+ xfs_perag_put(args->pag);
}
- up_read(&mp->m_peraglock);
if (bump_rotor || (type == XFS_ALLOCTYPE_ANY_AG)) {
if (args->agno == sagno)
mp->m_agfrotor = (mp->m_agfrotor + 1) %
@@ -2427,9 +2429,10 @@ xfs_alloc_vextent(
args->len);
#endif
}
+ xfs_perag_put(args->pag);
return 0;
error0:
- up_read(&mp->m_peraglock);
+ xfs_perag_put(args->pag);
return error;
}
@@ -2454,8 +2457,7 @@ xfs_free_extent(
args.agno = XFS_FSB_TO_AGNO(args.mp, bno);
ASSERT(args.agno < args.mp->m_sb.sb_agcount);
args.agbno = XFS_FSB_TO_AGBNO(args.mp, bno);
- down_read(&args.mp->m_peraglock);
- args.pag = &args.mp->m_perag[args.agno];
+ args.pag = xfs_perag_get(args.mp, args.agno);
if ((error = xfs_alloc_fix_freelist(&args, XFS_ALLOC_FLAG_FREEING)))
goto error0;
#ifdef DEBUG
@@ -2465,7 +2467,7 @@ xfs_free_extent(
#endif
error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0);
error0:
- up_read(&args.mp->m_peraglock);
+ xfs_perag_put(args.pag);
return error;
}
@@ -2486,15 +2488,15 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_agblock_t bno,
xfs_extlen_t len)
{
- xfs_mount_t *mp;
xfs_perag_busy_t *bsy;
+ struct xfs_perag *pag;
int n;
- mp = tp->t_mountp;
- spin_lock(&mp->m_perag[agno].pagb_lock);
+ pag = xfs_perag_get(tp->t_mountp, agno);
+ spin_lock(&pag->pagb_lock);
/* search pagb_list for an open slot */
- for (bsy = mp->m_perag[agno].pagb_list, n = 0;
+ for (bsy = pag->pagb_list, n = 0;
n < XFS_PAGB_NUM_SLOTS;
bsy++, n++) {
if (bsy->busy_tp == NULL) {
@@ -2502,11 +2504,11 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
}
}
- trace_xfs_alloc_busy(mp, agno, bno, len, n);
+ trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len, n);
if (n < XFS_PAGB_NUM_SLOTS) {
- bsy = &mp->m_perag[agno].pagb_list[n];
- mp->m_perag[agno].pagb_count++;
+ bsy = &pag->pagb_list[n];
+ pag->pagb_count++;
bsy->busy_start = bno;
bsy->busy_length = len;
bsy->busy_tp = tp;
@@ -2521,7 +2523,8 @@ xfs_alloc_mark_busy(xfs_trans_t *tp,
xfs_trans_set_sync(tp);
}
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
}
void
@@ -2529,24 +2532,23 @@ xfs_alloc_clear_busy(xfs_trans_t *tp,
xfs_agnumber_t agno,
int idx)
{
- xfs_mount_t *mp;
+ struct xfs_perag *pag;
xfs_perag_busy_t *list;
- mp = tp->t_mountp;
-
- spin_lock(&mp->m_perag[agno].pagb_lock);
- list = mp->m_perag[agno].pagb_list;
-
ASSERT(idx < XFS_PAGB_NUM_SLOTS);
+ pag = xfs_perag_get(tp->t_mountp, agno);
+ spin_lock(&pag->pagb_lock);
+ list = pag->pagb_list;
- trace_xfs_alloc_unbusy(mp, agno, idx, list[idx].busy_tp == tp);
+ trace_xfs_alloc_unbusy(tp->t_mountp, agno, idx, list[idx].busy_tp == tp);
if (list[idx].busy_tp == tp) {
list[idx].busy_tp = NULL;
- mp->m_perag[agno].pagb_count--;
+ pag->pagb_count--;
}
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
}
@@ -2560,17 +2562,15 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_agblock_t bno,
xfs_extlen_t len)
{
- xfs_mount_t *mp;
+ struct xfs_perag *pag;
xfs_perag_busy_t *bsy;
xfs_agblock_t uend, bend;
xfs_lsn_t lsn = 0;
int cnt;
- mp = tp->t_mountp;
-
- spin_lock(&mp->m_perag[agno].pagb_lock);
-
- uend = bno + len - 1;
+ pag = xfs_perag_get(tp->t_mountp, agno);
+ spin_lock(&pag->pagb_lock);
+ cnt = pag->pagb_count;
/*
* search pagb_list for this slot, skipping open slots. We have to
@@ -2578,8 +2578,9 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
* we have to get the most recent LSN for the log force to push out
* all the transactions that span the range.
*/
- for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) {
- bsy = &mp->m_perag[agno].pagb_list[cnt];
+ uend = bno + len - 1;
+ for (cnt = 0; cnt < pag->pagb_count; cnt++) {
+ bsy = &pag->pagb_list[cnt];
if (!bsy->busy_tp)
continue;
@@ -2591,7 +2592,8 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0)
lsn = bsy->busy_tp->t_commit_lsn;
}
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ spin_unlock(&pag->pagb_lock);
+ xfs_perag_put(pag);
trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn);
/*
@@ -2599,5 +2601,5 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
* transaction that freed the block
*/
if (lsn)
- xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
+ xfs_log_force_lsn(tp->t_mountp, lsn, XFS_LOG_SYNC);
}
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index adbd9141aea1..b726e10d2c1c 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -61,12 +61,14 @@ xfs_allocbt_set_root(
struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp);
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
int btnum = cur->bc_btnum;
+ struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno);
ASSERT(ptr->s != 0);
agf->agf_roots[btnum] = ptr->s;
be32_add_cpu(&agf->agf_levels[btnum], inc);
- cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc;
+ pag->pagf_levels[btnum] += inc;
+ xfs_perag_put(pag);
xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
@@ -150,6 +152,7 @@ xfs_allocbt_update_lastrec(
{
struct xfs_agf *agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);
xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno);
+ struct xfs_perag *pag;
__be32 len;
int numrecs;
@@ -193,7 +196,9 @@ xfs_allocbt_update_lastrec(
}
agf->agf_longest = len;
- cur->bc_mp->m_perag[seqno].pagf_longest = be32_to_cpu(len);
+ pag = xfs_perag_get(cur->bc_mp, seqno);
+ pag->pagf_longest = be32_to_cpu(len);
+ xfs_perag_put(pag);
xfs_alloc_log_agf(cur->bc_tp, cur->bc_private.a.agbp, XFS_AGF_LONGEST);
}
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index e953b6cfb2a8..b9c196a53c42 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -93,12 +93,12 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
STATIC int
xfs_attr_name_to_xname(
struct xfs_name *xname,
- const char *aname)
+ const unsigned char *aname)
{
if (!aname)
return EINVAL;
xname->name = aname;
- xname->len = strlen(aname);
+ xname->len = strlen((char *)aname);
if (xname->len >= MAXNAMELEN)
return EFAULT; /* match IRIX behaviour */
@@ -124,7 +124,7 @@ STATIC int
xfs_attr_get_int(
struct xfs_inode *ip,
struct xfs_name *name,
- char *value,
+ unsigned char *value,
int *valuelenp,
int flags)
{
@@ -171,8 +171,8 @@ xfs_attr_get_int(
int
xfs_attr_get(
xfs_inode_t *ip,
- const char *name,
- char *value,
+ const unsigned char *name,
+ unsigned char *value,
int *valuelenp,
int flags)
{
@@ -197,7 +197,7 @@ xfs_attr_get(
/*
* Calculate how many blocks we need for the new attribute,
*/
-int
+STATIC int
xfs_attr_calc_size(
struct xfs_inode *ip,
int namelen,
@@ -235,8 +235,12 @@ xfs_attr_calc_size(
}
STATIC int
-xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
- char *value, int valuelen, int flags)
+xfs_attr_set_int(
+ struct xfs_inode *dp,
+ struct xfs_name *name,
+ unsigned char *value,
+ int valuelen,
+ int flags)
{
xfs_da_args_t args;
xfs_fsblock_t firstblock;
@@ -452,8 +456,8 @@ out:
int
xfs_attr_set(
xfs_inode_t *dp,
- const char *name,
- char *value,
+ const unsigned char *name,
+ unsigned char *value,
int valuelen,
int flags)
{
@@ -600,7 +604,7 @@ out:
int
xfs_attr_remove(
xfs_inode_t *dp,
- const char *name,
+ const unsigned char *name,
int flags)
{
int error;
@@ -669,9 +673,13 @@ xfs_attr_list_int(xfs_attr_list_context_t *context)
*/
/*ARGSUSED*/
STATIC int
-xfs_attr_put_listent(xfs_attr_list_context_t *context, int flags,
- char *name, int namelen,
- int valuelen, char *value)
+xfs_attr_put_listent(
+ xfs_attr_list_context_t *context,
+ int flags,
+ unsigned char *name,
+ int namelen,
+ int valuelen,
+ unsigned char *value)
{
struct attrlist *alist = (struct attrlist *)context->alist;
attrlist_ent_t *aep;
@@ -1980,7 +1988,7 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
xfs_bmbt_irec_t map[ATTR_RMTVALUE_MAPSIZE];
xfs_mount_t *mp;
xfs_daddr_t dblkno;
- xfs_caddr_t dst;
+ void *dst;
xfs_buf_t *bp;
int nmap, error, tmp, valuelen, blkcnt, i;
xfs_dablk_t lblkno;
@@ -2007,15 +2015,14 @@ xfs_attr_rmtval_get(xfs_da_args_t *args)
dblkno = XFS_FSB_TO_DADDR(mp, map[i].br_startblock);
blkcnt = XFS_FSB_TO_BB(mp, map[i].br_blockcount);
error = xfs_read_buf(mp, mp->m_ddev_targp, dblkno,
- blkcnt,
- XFS_BUF_LOCK | XBF_DONT_BLOCK,
+ blkcnt, XBF_LOCK | XBF_DONT_BLOCK,
&bp);
if (error)
return(error);
tmp = (valuelen < XFS_BUF_SIZE(bp))
? valuelen : XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, dst, XFS_B_READ);
+ xfs_biomove(bp, 0, tmp, dst, XBF_READ);
xfs_buf_relse(bp);
dst += tmp;
valuelen -= tmp;
@@ -2039,7 +2046,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
xfs_inode_t *dp;
xfs_bmbt_irec_t map;
xfs_daddr_t dblkno;
- xfs_caddr_t src;
+ void *src;
xfs_buf_t *bp;
xfs_dablk_t lblkno;
int blkcnt, valuelen, nmap, error, tmp, committed;
@@ -2141,13 +2148,13 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
blkcnt = XFS_FSB_TO_BB(mp, map.br_blockcount);
bp = xfs_buf_get(mp->m_ddev_targp, dblkno, blkcnt,
- XFS_BUF_LOCK | XBF_DONT_BLOCK);
+ XBF_LOCK | XBF_DONT_BLOCK);
ASSERT(bp);
ASSERT(!XFS_BUF_GETERROR(bp));
tmp = (valuelen < XFS_BUF_SIZE(bp)) ? valuelen :
XFS_BUF_SIZE(bp);
- xfs_biomove(bp, 0, tmp, src, XFS_B_WRITE);
+ xfs_biomove(bp, 0, tmp, src, XBF_WRITE);
if (tmp < XFS_BUF_SIZE(bp))
xfs_biozero(bp, tmp, XFS_BUF_SIZE(bp) - tmp);
if ((error = xfs_bwrite(mp, bp))) {/* GROT: NOTE: synchronous write */
@@ -2208,8 +2215,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
/*
* If the "remote" value is in the cache, remove it.
*/
- bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt,
- XFS_INCORE_TRYLOCK);
+ bp = xfs_incore(mp->m_ddev_targp, dblkno, blkcnt, XBF_TRYLOCK);
if (bp) {
XFS_BUF_STALE(bp);
XFS_BUF_UNDELAYWRITE(bp);
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 59b410ce69a1..e920d68ef509 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -113,7 +113,7 @@ typedef struct attrlist_cursor_kern {
typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
- char *, int, int, char *);
+ unsigned char *, int, int, unsigned char *);
typedef struct xfs_attr_list_context {
struct xfs_inode *dp; /* inode */
@@ -139,7 +139,6 @@ typedef struct xfs_attr_list_context {
/*
* Overall external interface routines.
*/
-int xfs_attr_calc_size(struct xfs_inode *, int, int, int *);
int xfs_attr_inactive(struct xfs_inode *dp);
int xfs_attr_rmtval_get(struct xfs_da_args *args);
int xfs_attr_list_int(struct xfs_attr_list_context *);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index baf41b5af756..a90ce74fc256 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -521,11 +521,11 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; i++) {
- nargs.name = (char *)sfe->nameval;
+ nargs.name = sfe->nameval;
nargs.namelen = sfe->namelen;
- nargs.value = (char *)&sfe->nameval[nargs.namelen];
+ nargs.value = &sfe->nameval[nargs.namelen];
nargs.valuelen = sfe->valuelen;
- nargs.hashval = xfs_da_hashname((char *)sfe->nameval,
+ nargs.hashval = xfs_da_hashname(sfe->nameval,
sfe->namelen);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(sfe->flags);
error = xfs_attr_leaf_lookup_int(bp, &nargs); /* set a->index */
@@ -612,10 +612,10 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) {
error = context->put_listent(context,
sfe->flags,
- (char *)sfe->nameval,
+ sfe->nameval,
(int)sfe->namelen,
(int)sfe->valuelen,
- (char*)&sfe->nameval[sfe->namelen]);
+ &sfe->nameval[sfe->namelen]);
/*
* Either search callback finished early or
@@ -659,8 +659,8 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
sbp->entno = i;
- sbp->hash = xfs_da_hashname((char *)sfe->nameval, sfe->namelen);
- sbp->name = (char *)sfe->nameval;
+ sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen);
+ sbp->name = sfe->nameval;
sbp->namelen = sfe->namelen;
/* These are bytes, and both on-disk, don't endian-flip */
sbp->valuelen = sfe->valuelen;
@@ -818,9 +818,9 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
continue;
ASSERT(entry->flags & XFS_ATTR_LOCAL);
name_loc = xfs_attr_leaf_name_local(leaf, i);
- nargs.name = (char *)name_loc->nameval;
+ nargs.name = name_loc->nameval;
nargs.namelen = name_loc->namelen;
- nargs.value = (char *)&name_loc->nameval[nargs.namelen];
+ nargs.value = &name_loc->nameval[nargs.namelen];
nargs.valuelen = be16_to_cpu(name_loc->valuelen);
nargs.hashval = be32_to_cpu(entry->hashval);
nargs.flags = XFS_ATTR_NSP_ONDISK_TO_ARGS(entry->flags);
@@ -2370,10 +2370,10 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
retval = context->put_listent(context,
entry->flags,
- (char *)name_loc->nameval,
+ name_loc->nameval,
(int)name_loc->namelen,
be16_to_cpu(name_loc->valuelen),
- (char *)&name_loc->nameval[name_loc->namelen]);
+ &name_loc->nameval[name_loc->namelen]);
if (retval)
return retval;
} else {
@@ -2397,15 +2397,15 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
return retval;
retval = context->put_listent(context,
entry->flags,
- (char *)name_rmt->name,
+ name_rmt->name,
(int)name_rmt->namelen,
valuelen,
- (char*)args.value);
+ args.value);
kmem_free(args.value);
} else {
retval = context->put_listent(context,
entry->flags,
- (char *)name_rmt->name,
+ name_rmt->name,
(int)name_rmt->namelen,
valuelen,
NULL);
@@ -2950,7 +2950,7 @@ xfs_attr_leaf_freextent(xfs_trans_t **trans, xfs_inode_t *dp,
map.br_blockcount);
bp = xfs_trans_get_buf(*trans,
dp->i_mount->m_ddev_targp,
- dblkno, dblkcnt, XFS_BUF_LOCK);
+ dblkno, dblkcnt, XBF_LOCK);
xfs_trans_binval(*trans, bp);
/*
* Roll to next transaction.
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index 76ab7b0cbb3a..919756e3ba53 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -52,7 +52,7 @@ typedef struct xfs_attr_sf_sort {
__uint8_t valuelen; /* length of value */
__uint8_t flags; /* flags bits (see xfs_attr_leaf.h) */
xfs_dahash_t hash; /* this entry's hash value */
- char *name; /* name value, pointer into buffer */
+ unsigned char *name; /* name value, pointer into buffer */
} xfs_attr_sf_sort_t;
#define XFS_ATTR_SF_ENTSIZE_BYNAME(nlen,vlen) /* space name/value uses */ \
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 98251cdc52aa..1869fb973819 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -2629,13 +2629,12 @@ xfs_bmap_btalloc(
if (startag == NULLAGNUMBER)
startag = ag = 0;
notinit = 0;
- down_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, ag);
while (blen < ap->alen) {
- pag = &mp->m_perag[ag];
if (!pag->pagf_init &&
(error = xfs_alloc_pagf_init(mp, args.tp,
ag, XFS_ALLOC_FLAG_TRYLOCK))) {
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
return error;
}
/*
@@ -2667,13 +2666,13 @@ xfs_bmap_btalloc(
break;
error = xfs_filestream_new_ag(ap, &ag);
- if (error) {
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
+ if (error)
return error;
- }
/* loop again to set 'blen'*/
startag = NULLAGNUMBER;
+ pag = xfs_perag_get(mp, ag);
continue;
}
}
@@ -2681,8 +2680,10 @@ xfs_bmap_btalloc(
ag = 0;
if (ag == startag)
break;
+ xfs_perag_put(pag);
+ pag = xfs_perag_get(mp, ag);
}
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
/*
* Since the above loop did a BUF_TRYLOCK, it is
* possible that there is space for this request.
@@ -4470,7 +4471,7 @@ xfs_bmapi(
xfs_fsblock_t abno; /* allocated block number */
xfs_extlen_t alen; /* allocated extent length */
xfs_fileoff_t aoff; /* allocated file offset */
- xfs_bmalloca_t bma; /* args for xfs_bmap_alloc */
+ xfs_bmalloca_t bma = { 0 }; /* args for xfs_bmap_alloc */
xfs_btree_cur_t *cur; /* bmap btree cursor */
xfs_fileoff_t end; /* end of mapped file region */
int eof; /* we've hit the end of extents */
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index 38751d5fac6f..416e47e54b83 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -334,7 +334,7 @@ xfs_bmbt_disk_set_allf(
/*
* Set all the fields in a bmap extent record from the uncompressed form.
*/
-void
+STATIC void
xfs_bmbt_disk_set_all(
xfs_bmbt_rec_t *r,
xfs_bmbt_irec_t *s)
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index cf07ca7c22e7..0e66c4ea0f85 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -223,7 +223,6 @@ extern void xfs_bmbt_set_startblock(xfs_bmbt_rec_host_t *r, xfs_fsblock_t v);
extern void xfs_bmbt_set_startoff(xfs_bmbt_rec_host_t *r, xfs_fileoff_t v);
extern void xfs_bmbt_set_state(xfs_bmbt_rec_host_t *r, xfs_exntst_t v);
-extern void xfs_bmbt_disk_set_all(xfs_bmbt_rec_t *r, xfs_bmbt_irec_t *s);
extern void xfs_bmbt_disk_set_allf(xfs_bmbt_rec_t *r, xfs_fileoff_t o,
xfs_fsblock_t b, xfs_filblks_t c, xfs_exntst_t v);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 36a0992dd669..96be4b0f2496 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -977,7 +977,7 @@ xfs_btree_get_buf_block(
xfs_daddr_t d;
/* need to sort out how callers deal with failures first */
- ASSERT(!(flags & XFS_BUF_TRYLOCK));
+ ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
*bpp = xfs_trans_get_buf(cur->bc_tp, mp->m_ddev_targp, d,
@@ -1008,7 +1008,7 @@ xfs_btree_read_buf_block(
int error;
/* need to sort out how callers deal with failures first */
- ASSERT(!(flags & XFS_BUF_TRYLOCK));
+ ASSERT(!(flags & XBF_TRYLOCK));
d = xfs_btree_ptr_to_daddr(cur, ptr);
error = xfs_trans_read_buf(mp, cur->bc_tp, mp->m_ddev_targp, d,
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a30f7e9eb2b9..f3c49e69eab9 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -250,7 +250,7 @@ xfs_buf_item_format(
((bip->bli_format.blf_map_size - 1) * sizeof(uint)));
vecp->i_addr = (xfs_caddr_t)&bip->bli_format;
vecp->i_len = base_size;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BFORMAT);
+ vecp->i_type = XLOG_REG_TYPE_BFORMAT;
vecp++;
nvecs = 1;
@@ -297,14 +297,14 @@ xfs_buf_item_format(
buffer_offset = first_bit * XFS_BLI_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
+ vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
break;
} else if (next_bit != last_bit + 1) {
buffer_offset = first_bit * XFS_BLI_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
+ vecp->i_type = XLOG_REG_TYPE_BCHUNK;
nvecs++;
vecp++;
first_bit = next_bit;
@@ -316,7 +316,7 @@ xfs_buf_item_format(
buffer_offset = first_bit * XFS_BLI_CHUNK;
vecp->i_addr = xfs_buf_offset(bp, buffer_offset);
vecp->i_len = nbits * XFS_BLI_CHUNK;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_BCHUNK);
+ vecp->i_type = XLOG_REG_TYPE_BCHUNK;
/* You would think we need to bump the nvecs here too, but we do not
* this number is used by recovery, and it gets confused by the boundary
* split here
@@ -467,8 +467,10 @@ xfs_buf_item_unpin_remove(
/*
* This is called to attempt to lock the buffer associated with this
* buf log item. Don't sleep on the buffer lock. If we can't get
- * the lock right away, return 0. If we can get the lock, pull the
- * buffer from the free list, mark it busy, and return 1.
+ * the lock right away, return 0. If we can get the lock, take a
+ * reference to the buffer. If this is a delayed write buffer that
+ * needs AIL help to be written back, invoke the pushbuf routine
+ * rather than the normal success path.
*/
STATIC uint
xfs_buf_item_trylock(
@@ -477,24 +479,18 @@ xfs_buf_item_trylock(
xfs_buf_t *bp;
bp = bip->bli_buf;
-
- if (XFS_BUF_ISPINNED(bp)) {
+ if (XFS_BUF_ISPINNED(bp))
return XFS_ITEM_PINNED;
- }
-
- if (!XFS_BUF_CPSEMA(bp)) {
+ if (!XFS_BUF_CPSEMA(bp))
return XFS_ITEM_LOCKED;
- }
- /*
- * Remove the buffer from the free list. Only do this
- * if it's on the free list. Private buffers like the
- * superblock buffer are not.
- */
+ /* take a reference to the buffer. */
XFS_BUF_HOLD(bp);
ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
trace_xfs_buf_item_trylock(bip);
+ if (XFS_BUF_ISDELAYWRITE(bp))
+ return XFS_ITEM_PUSHBUF;
return XFS_ITEM_SUCCESS;
}
@@ -626,11 +622,9 @@ xfs_buf_item_committed(
}
/*
- * This is called to asynchronously write the buffer associated with this
- * buf log item out to disk. The buffer will already have been locked by
- * a successful call to xfs_buf_item_trylock(). If the buffer still has
- * B_DELWRI set, then get it going out to disk with a call to bawrite().
- * If not, then just release the buffer.
+ * The buffer is locked, but is not a delayed write buffer. This happens
+ * if we race with IO completion and hence we don't want to try to write it
+ * again. Just release the buffer.
*/
STATIC void
xfs_buf_item_push(
@@ -642,17 +636,29 @@ xfs_buf_item_push(
trace_xfs_buf_item_push(bip);
bp = bip->bli_buf;
+ ASSERT(!XFS_BUF_ISDELAYWRITE(bp));
+ xfs_buf_relse(bp);
+}
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- int error;
- error = xfs_bawrite(bip->bli_item.li_mountp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, bip->bli_item.li_mountp,
- "xfs_buf_item_push: pushbuf error %d on bip %p, bp %p",
- error, bip, bp);
- } else {
- xfs_buf_relse(bp);
- }
+/*
+ * The buffer is locked and is a delayed write buffer. Promote the buffer
+ * in the delayed write queue as the caller knows that they must invoke
+ * the xfsbufd to get this buffer written. We have to unlock the buffer
+ * to allow the xfsbufd to write it, too.
+ */
+STATIC void
+xfs_buf_item_pushbuf(
+ xfs_buf_log_item_t *bip)
+{
+ xfs_buf_t *bp;
+
+ ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
+ trace_xfs_buf_item_pushbuf(bip);
+
+ bp = bip->bli_buf;
+ ASSERT(XFS_BUF_ISDELAYWRITE(bp));
+ xfs_buf_delwri_promote(bp);
+ xfs_buf_relse(bp);
}
/* ARGSUSED */
@@ -677,7 +683,7 @@ static struct xfs_item_ops xfs_buf_item_ops = {
.iop_committed = (xfs_lsn_t(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committed,
.iop_push = (void(*)(xfs_log_item_t*))xfs_buf_item_push,
- .iop_pushbuf = NULL,
+ .iop_pushbuf = (void(*)(xfs_log_item_t*))xfs_buf_item_pushbuf,
.iop_committing = (void(*)(xfs_log_item_t*, xfs_lsn_t))
xfs_buf_item_committing
};
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index c0c8869115b1..0ca556b4bf31 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1534,8 +1534,8 @@ xfs_da_hashname(const __uint8_t *name, int namelen)
enum xfs_dacmp
xfs_da_compname(
struct xfs_da_args *args,
- const char *name,
- int len)
+ const unsigned char *name,
+ int len)
{
return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 30cd08f56a3a..fe9f5a8c1d2a 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -209,7 +209,8 @@ typedef struct xfs_da_state {
*/
struct xfs_nameops {
xfs_dahash_t (*hashname)(struct xfs_name *);
- enum xfs_dacmp (*compname)(struct xfs_da_args *, const char *, int);
+ enum xfs_dacmp (*compname)(struct xfs_da_args *,
+ const unsigned char *, int);
};
@@ -260,7 +261,7 @@ int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
uint xfs_da_hashname(const __uint8_t *name_string, int name_length);
enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
- const char *name, int len);
+ const unsigned char *name, int len);
xfs_da_state_t *xfs_da_state_alloc(void);
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index d1483a4f71b8..cd27c9d6c71f 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -45,15 +45,21 @@
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
+
+static int xfs_swap_extents(
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip, /* tmp inode */
+ xfs_swapext_t *sxp);
+
/*
- * Syssgi interface for swapext
+ * ioctl interface for swapext
*/
int
xfs_swapext(
xfs_swapext_t *sxp)
{
xfs_inode_t *ip, *tip;
- struct file *file, *target_file;
+ struct file *file, *tmp_file;
int error = 0;
/* Pull information for the target fd */
@@ -68,56 +74,128 @@ xfs_swapext(
goto out_put_file;
}
- target_file = fget((int)sxp->sx_fdtmp);
- if (!target_file) {
+ tmp_file = fget((int)sxp->sx_fdtmp);
+ if (!tmp_file) {
error = XFS_ERROR(EINVAL);
goto out_put_file;
}
- if (!(target_file->f_mode & FMODE_WRITE) ||
- (target_file->f_flags & O_APPEND)) {
+ if (!(tmp_file->f_mode & FMODE_WRITE) ||
+ (tmp_file->f_flags & O_APPEND)) {
error = XFS_ERROR(EBADF);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
if (IS_SWAPFILE(file->f_path.dentry->d_inode) ||
- IS_SWAPFILE(target_file->f_path.dentry->d_inode)) {
+ IS_SWAPFILE(tmp_file->f_path.dentry->d_inode)) {
error = XFS_ERROR(EINVAL);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
ip = XFS_I(file->f_path.dentry->d_inode);
- tip = XFS_I(target_file->f_path.dentry->d_inode);
+ tip = XFS_I(tmp_file->f_path.dentry->d_inode);
if (ip->i_mount != tip->i_mount) {
error = XFS_ERROR(EINVAL);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
if (ip->i_ino == tip->i_ino) {
error = XFS_ERROR(EINVAL);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
error = XFS_ERROR(EIO);
- goto out_put_target_file;
+ goto out_put_tmp_file;
}
error = xfs_swap_extents(ip, tip, sxp);
- out_put_target_file:
- fput(target_file);
+ out_put_tmp_file:
+ fput(tmp_file);
out_put_file:
fput(file);
out:
return error;
}
-int
+/*
+ * We need to check that the format of the data fork in the temporary inode is
+ * valid for the target inode before doing the swap. This is not a problem with
+ * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
+ * data fork depending on the space the attribute fork is taking so we can get
+ * invalid formats on the target inode.
+ *
+ * E.g. target has space for 7 extents in extent format, temp inode only has
+ * space for 6. If we defragment down to 7 extents, then the tmp format is a
+ * btree, but when swapped it needs to be in extent format. Hence we can't just
+ * blindly swap data forks on attr2 filesystems.
+ *
+ * Note that we check the swap in both directions so that we don't end up with
+ * a corrupt temporary inode, either.
+ *
+ * Note that fixing the way xfs_fsr sets up the attribute fork in the source
+ * inode will prevent this situation from occurring, so all we do here is
+ * reject and log the attempt. basically we are putting the responsibility on
+ * userspace to get this right.
+ */
+static int
+xfs_swap_extents_check_format(
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip) /* tmp inode */
+{
+
+ /* Should never get a local format */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
+ tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+ return EINVAL;
+
+ /*
+ * if the target inode has less extents that then temporary inode then
+ * why did userspace call us?
+ */
+ if (ip->i_d.di_nextents < tip->i_d.di_nextents)
+ return EINVAL;
+
+ /*
+ * if the target inode is in extent form and the temp inode is in btree
+ * form then we will end up with the target inode in the wrong format
+ * as we already know there are less extents in the temp inode.
+ */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ return EINVAL;
+
+ /* Check temp in extent form to max in target */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > ip->i_df.if_ext_max)
+ return EINVAL;
+
+ /* Check target in extent form to max in temp */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
+ return EINVAL;
+
+ /* Check root block of temp in btree form to max in target */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_BOFF(ip) &&
+ tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
+ return EINVAL;
+
+ /* Check root block of target in btree form to max in temp */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_BOFF(tip) &&
+ ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
+ return EINVAL;
+
+ return 0;
+}
+
+static int
xfs_swap_extents(
- xfs_inode_t *ip,
- xfs_inode_t *tip,
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip, /* tmp inode */
xfs_swapext_t *sxp)
{
xfs_mount_t *mp;
@@ -161,13 +239,6 @@ xfs_swap_extents(
goto out_unlock;
}
- /* Should never get a local format */
- if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
- tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- error = XFS_ERROR(EINVAL);
- goto out_unlock;
- }
-
if (VN_CACHED(VFS_I(tip)) != 0) {
error = xfs_flushinval_pages(tip, 0, -1,
FI_REMAPF_LOCKED);
@@ -189,13 +260,15 @@ xfs_swap_extents(
goto out_unlock;
}
- /*
- * If the target has extended attributes, the tmp file
- * must also in order to ensure the correct data fork
- * format.
- */
- if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) {
- error = XFS_ERROR(EINVAL);
+ trace_xfs_swap_extent_before(ip, 0);
+ trace_xfs_swap_extent_before(tip, 1);
+
+ /* check inode formats now that data is flushed */
+ error = xfs_swap_extents_check_format(ip, tip);
+ if (error) {
+ xfs_fs_cmn_err(CE_NOTE, mp,
+ "%s: inode 0x%llx format is incompatible for exchanging.",
+ __FILE__, ip->i_ino);
goto out_unlock;
}
@@ -276,6 +349,16 @@ xfs_swap_extents(
*tifp = *tempifp; /* struct copy */
/*
+ * Fix the in-memory data fork values that are dependent on the fork
+ * offset in the inode. We can't assume they remain the same as attr2
+ * has dynamic fork offsets.
+ */
+ ifp->if_ext_max = XFS_IFORK_SIZE(ip, XFS_DATA_FORK) /
+ (uint)sizeof(xfs_bmbt_rec_t);
+ tifp->if_ext_max = XFS_IFORK_SIZE(tip, XFS_DATA_FORK) /
+ (uint)sizeof(xfs_bmbt_rec_t);
+
+ /*
* Fix the on-disk inode values
*/
tmp = (__uint64_t)ip->i_d.di_nblocks;
@@ -347,6 +430,8 @@ xfs_swap_extents(
error = xfs_trans_commit(tp, XFS_TRANS_SWAPEXT);
+ trace_xfs_swap_extent_after(ip, 0);
+ trace_xfs_swap_extent_after(tip, 1);
out:
kmem_free(tempifp);
return error;
diff --git a/fs/xfs/xfs_dfrag.h b/fs/xfs/xfs_dfrag.h
index 4f55a6306558..20bdd935c121 100644
--- a/fs/xfs/xfs_dfrag.h
+++ b/fs/xfs/xfs_dfrag.h
@@ -48,9 +48,6 @@ typedef struct xfs_swapext
*/
int xfs_swapext(struct xfs_swapext *sx);
-int xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
- struct xfs_swapext *sxp);
-
#endif /* __KERNEL__ */
#endif /* __XFS_DFRAG_H__ */
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 93634a7e90e9..42520f041265 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -44,7 +44,7 @@
#include "xfs_vnodeops.h"
#include "xfs_trace.h"
-struct xfs_name xfs_name_dotdot = {"..", 2};
+struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2};
/*
* ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -66,8 +66,8 @@ xfs_ascii_ci_hashname(
STATIC enum xfs_dacmp
xfs_ascii_ci_compname(
struct xfs_da_args *args,
- const char *name,
- int len)
+ const unsigned char *name,
+ int len)
{
enum xfs_dacmp result;
int i;
@@ -247,7 +247,7 @@ xfs_dir_createname(
int
xfs_dir_cilookup_result(
struct xfs_da_args *args,
- const char *name,
+ const unsigned char *name,
int len)
{
if (args->cmpresult == XFS_CMP_DIFFERENT)
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 1d9ef96f33aa..74a3b1057685 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -100,7 +100,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_dabuf *bp);
-extern int xfs_dir_cilookup_result(struct xfs_da_args *args, const char *name,
- int len);
+extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
+ const unsigned char *name, int len);
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index ddc4ecc7807f..779a267b0a84 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -57,8 +57,8 @@ static xfs_dahash_t xfs_dir_hash_dot, xfs_dir_hash_dotdot;
void
xfs_dir_startup(void)
{
- xfs_dir_hash_dot = xfs_da_hashname(".", 1);
- xfs_dir_hash_dotdot = xfs_da_hashname("..", 2);
+ xfs_dir_hash_dot = xfs_da_hashname((unsigned char *)".", 1);
+ xfs_dir_hash_dotdot = xfs_da_hashname((unsigned char *)"..", 2);
}
/*
@@ -513,8 +513,9 @@ xfs_dir2_block_getdents(
/*
* If it didn't fit, set the final offset to here & return.
*/
- if (filldir(dirent, dep->name, dep->namelen, cook & 0x7fffffff,
- be64_to_cpu(dep->inumber), DT_UNKNOWN)) {
+ if (filldir(dirent, (char *)dep->name, dep->namelen,
+ cook & 0x7fffffff, be64_to_cpu(dep->inumber),
+ DT_UNKNOWN)) {
*offset = cook & 0x7fffffff;
xfs_da_brelse(NULL, bp);
return 0;
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 29f484c11b3a..e2d89854ec9e 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -1081,7 +1081,7 @@ xfs_dir2_leaf_getdents(
dep = (xfs_dir2_data_entry_t *)ptr;
length = xfs_dir2_data_entsize(dep->namelen);
- if (filldir(dirent, dep->name, dep->namelen,
+ if (filldir(dirent, (char *)dep->name, dep->namelen,
xfs_dir2_byte_to_dataptr(mp, curoff) & 0x7fffffff,
be64_to_cpu(dep->inumber), DT_UNKNOWN))
break;
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index ce6e355199b5..78fc4d9ae756 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -65,7 +65,7 @@ static int xfs_dir2_node_addname_int(xfs_da_args_t *args,
/*
* Log entries from a freespace block.
*/
-void
+STATIC void
xfs_dir2_free_log_bests(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* freespace buffer */
diff --git a/fs/xfs/xfs_dir2_node.h b/fs/xfs/xfs_dir2_node.h
index dde72db3d695..82dfe7147195 100644
--- a/fs/xfs/xfs_dir2_node.h
+++ b/fs/xfs/xfs_dir2_node.h
@@ -75,8 +75,6 @@ xfs_dir2_db_to_fdindex(struct xfs_mount *mp, xfs_dir2_db_t db)
return ((db) % XFS_DIR2_MAX_FREE_BESTS(mp));
}
-extern void xfs_dir2_free_log_bests(struct xfs_trans *tp, struct xfs_dabuf *bp,
- int first, int last);
extern int xfs_dir2_leaf_to_node(struct xfs_da_args *args,
struct xfs_dabuf *lbp);
extern xfs_dahash_t xfs_dir2_leafn_lasthash(struct xfs_dabuf *bp, int *count);
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 9d4f17a69676..c1a5945d463a 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -782,7 +782,7 @@ xfs_dir2_sf_getdents(
}
ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
- if (filldir(dirent, sfep->name, sfep->namelen,
+ if (filldir(dirent, (char *)sfep->name, sfep->namelen,
off & 0x7fffffff, ino, DT_UNKNOWN)) {
*offset = off & 0x7fffffff;
return 0;
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 05a4bdd4be39..6f35ed1b39b9 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -82,7 +82,7 @@ xfs_efi_item_format(xfs_efi_log_item_t *efip,
log_vector->i_addr = (xfs_caddr_t)&(efip->efi_format);
log_vector->i_len = size;
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFI_FORMAT);
+ log_vector->i_type = XLOG_REG_TYPE_EFI_FORMAT;
ASSERT(size >= sizeof(xfs_efi_log_format_t));
}
@@ -406,7 +406,7 @@ xfs_efd_item_format(xfs_efd_log_item_t *efdp,
log_vector->i_addr = (xfs_caddr_t)&(efdp->efd_format);
log_vector->i_len = size;
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_EFD_FORMAT);
+ log_vector->i_type = XLOG_REG_TYPE_EFD_FORMAT;
ASSERT(size >= sizeof(xfs_efd_log_format_t));
}
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index a631e1451abb..390850ee6603 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -140,6 +140,7 @@ _xfs_filestream_pick_ag(
int flags,
xfs_extlen_t minlen)
{
+ int streams, max_streams;
int err, trylock, nscan;
xfs_extlen_t longest, free, minfree, maxfree = 0;
xfs_agnumber_t ag, max_ag = NULLAGNUMBER;
@@ -155,15 +156,15 @@ _xfs_filestream_pick_ag(
trylock = XFS_ALLOC_FLAG_TRYLOCK;
for (nscan = 0; 1; nscan++) {
-
- TRACE_AG_SCAN(mp, ag, xfs_filestream_peek_ag(mp, ag));
-
- pag = mp->m_perag + ag;
+ pag = xfs_perag_get(mp, ag);
+ TRACE_AG_SCAN(mp, ag, atomic_read(&pag->pagf_fstrms));
if (!pag->pagf_init) {
err = xfs_alloc_pagf_init(mp, NULL, ag, trylock);
- if (err && !trylock)
+ if (err && !trylock) {
+ xfs_perag_put(pag);
return err;
+ }
}
/* Might fail sometimes during the 1st pass with trylock set. */
@@ -173,6 +174,7 @@ _xfs_filestream_pick_ag(
/* Keep track of the AG with the most free blocks. */
if (pag->pagf_freeblks > maxfree) {
maxfree = pag->pagf_freeblks;
+ max_streams = atomic_read(&pag->pagf_fstrms);
max_ag = ag;
}
@@ -195,6 +197,8 @@ _xfs_filestream_pick_ag(
/* Break out, retaining the reference on the AG. */
free = pag->pagf_freeblks;
+ streams = atomic_read(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
*agp = ag;
break;
}
@@ -202,6 +206,7 @@ _xfs_filestream_pick_ag(
/* Drop the reference on this AG, it's not usable. */
xfs_filestream_put_ag(mp, ag);
next_ag:
+ xfs_perag_put(pag);
/* Move to the next AG, wrapping to AG 0 if necessary. */
if (++ag >= mp->m_sb.sb_agcount)
ag = 0;
@@ -229,6 +234,7 @@ next_ag:
if (max_ag != NULLAGNUMBER) {
xfs_filestream_get_ag(mp, max_ag);
TRACE_AG_PICK1(mp, max_ag, maxfree);
+ streams = max_streams;
free = maxfree;
*agp = max_ag;
break;
@@ -240,16 +246,14 @@ next_ag:
return 0;
}
- TRACE_AG_PICK2(mp, startag, *agp, xfs_filestream_peek_ag(mp, *agp),
- free, nscan, flags);
+ TRACE_AG_PICK2(mp, startag, *agp, streams, free, nscan, flags);
return 0;
}
/*
* Set the allocation group number for a file or a directory, updating inode
- * references and per-AG references as appropriate. Must be called with the
- * m_peraglock held in read mode.
+ * references and per-AG references as appropriate.
*/
static int
_xfs_filestream_update_ag(
@@ -451,20 +455,6 @@ xfs_filestream_unmount(
}
/*
- * If the mount point's m_perag array is going to be reallocated, all
- * outstanding cache entries must be flushed to avoid accessing reference count
- * addresses that have been freed. The call to xfs_filestream_flush() must be
- * made inside the block that holds the m_peraglock in write mode to do the
- * reallocation.
- */
-void
-xfs_filestream_flush(
- xfs_mount_t *mp)
-{
- xfs_mru_cache_flush(mp->m_filestream);
-}
-
-/*
* Return the AG of the filestream the file or directory belongs to, or
* NULLAGNUMBER otherwise.
*/
@@ -526,7 +516,6 @@ xfs_filestream_associate(
mp = pip->i_mount;
cache = mp->m_filestream;
- down_read(&mp->m_peraglock);
/*
* We have a problem, Houston.
@@ -543,10 +532,8 @@ xfs_filestream_associate(
*
* So, if we can't get the iolock without sleeping then just give up
*/
- if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL)) {
- up_read(&mp->m_peraglock);
+ if (!xfs_ilock_nowait(pip, XFS_IOLOCK_EXCL))
return 1;
- }
/* If the parent directory is already in the cache, use its AG. */
item = xfs_mru_cache_lookup(cache, pip->i_ino);
@@ -601,7 +588,6 @@ exit_did_pick:
exit:
xfs_iunlock(pip, XFS_IOLOCK_EXCL);
- up_read(&mp->m_peraglock);
return -err;
}
diff --git a/fs/xfs/xfs_filestream.h b/fs/xfs/xfs_filestream.h
index 4aba67c5f64f..260f757bbc5d 100644
--- a/fs/xfs/xfs_filestream.h
+++ b/fs/xfs/xfs_filestream.h
@@ -79,12 +79,21 @@ extern ktrace_t *xfs_filestreams_trace_buf;
* the cache that reference per-ag array elements that have since been
* reallocated.
*/
+/*
+ * xfs_filestream_peek_ag is only used in tracing code
+ */
static inline int
xfs_filestream_peek_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
- return atomic_read(&mp->m_perag[agno].pagf_fstrms);
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_read(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
}
static inline int
@@ -92,7 +101,13 @@ xfs_filestream_get_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
- return atomic_inc_return(&mp->m_perag[agno].pagf_fstrms);
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_inc_return(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
}
static inline int
@@ -100,7 +115,13 @@ xfs_filestream_put_ag(
xfs_mount_t *mp,
xfs_agnumber_t agno)
{
- return atomic_dec_return(&mp->m_perag[agno].pagf_fstrms);
+ struct xfs_perag *pag;
+ int ret;
+
+ pag = xfs_perag_get(mp, agno);
+ ret = atomic_dec_return(&pag->pagf_fstrms);
+ xfs_perag_put(pag);
+ return ret;
}
/* allocation selection flags */
@@ -114,7 +135,6 @@ int xfs_filestream_init(void);
void xfs_filestream_uninit(void);
int xfs_filestream_mount(struct xfs_mount *mp);
void xfs_filestream_unmount(struct xfs_mount *mp);
-void xfs_filestream_flush(struct xfs_mount *mp);
xfs_agnumber_t xfs_filestream_lookup_ag(struct xfs_inode *ip);
int xfs_filestream_associate(struct xfs_inode *dip, struct xfs_inode *ip);
void xfs_filestream_deassociate(struct xfs_inode *ip);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a13919a6a364..37a6f62c57b6 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -167,27 +167,14 @@ xfs_growfs_data_private(
}
new = nb - mp->m_sb.sb_dblocks;
oagcount = mp->m_sb.sb_agcount;
- if (nagcount > oagcount) {
- void *new_perag, *old_perag;
-
- xfs_filestream_flush(mp);
-
- new_perag = kmem_zalloc(sizeof(xfs_perag_t) * nagcount,
- KM_MAYFAIL);
- if (!new_perag)
- return XFS_ERROR(ENOMEM);
-
- down_write(&mp->m_peraglock);
- memcpy(new_perag, mp->m_perag, sizeof(xfs_perag_t) * oagcount);
- old_perag = mp->m_perag;
- mp->m_perag = new_perag;
-
- mp->m_flags |= XFS_MOUNT_32BITINODES;
- nagimax = xfs_initialize_perag(mp, nagcount);
- up_write(&mp->m_peraglock);
- kmem_free(old_perag);
+ /* allocate the new per-ag structures */
+ if (nagcount > oagcount) {
+ error = xfs_initialize_perag(mp, nagcount, &nagimax);
+ if (error)
+ return error;
}
+
tp = xfs_trans_alloc(mp, XFS_TRANS_GROWFS);
tp->t_flags |= XFS_TRANS_RESERVE;
if ((error = xfs_trans_reserve(tp, XFS_GROWFS_SPACE_RES(mp),
@@ -196,6 +183,11 @@ xfs_growfs_data_private(
return error;
}
+ /*
+ * Write new AG headers to disk. Non-transactional, but written
+ * synchronously so they are completed prior to the growfs transaction
+ * being logged.
+ */
nfree = 0;
for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
/*
@@ -359,6 +351,12 @@ xfs_growfs_data_private(
goto error0;
}
}
+
+ /*
+ * Update changed superblock fields transactionally. These are not
+ * seen by the rest of the world until the transaction commit applies
+ * them atomically to the superblock.
+ */
if (nagcount > oagcount)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount);
if (nb > mp->m_sb.sb_dblocks)
@@ -369,9 +367,9 @@ xfs_growfs_data_private(
if (dpct)
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct);
error = xfs_trans_commit(tp, 0);
- if (error) {
+ if (error)
return error;
- }
+
/* New allocation groups fully initialized, so update mount struct */
if (nagimax)
mp->m_maxagi = nagimax;
@@ -381,6 +379,8 @@ xfs_growfs_data_private(
mp->m_maxicount = icount << mp->m_sb.sb_inopblog;
} else
mp->m_maxicount = 0;
+
+ /* update secondary superblocks. */
for (agno = 1; agno < nagcount; agno++) {
error = xfs_read_buf(mp, mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index cb907ba69c4c..9d884c127bb9 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -205,7 +205,7 @@ xfs_ialloc_inode_init(
d = XFS_AGB_TO_DADDR(mp, agno, agbno + (j * blks_per_cluster));
fbuf = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
mp->m_bsize * blks_per_cluster,
- XFS_BUF_LOCK);
+ XBF_LOCK);
ASSERT(fbuf);
ASSERT(!XFS_BUF_GETERROR(fbuf));
@@ -253,6 +253,7 @@ xfs_ialloc_ag_alloc(
xfs_agino_t thisino; /* current inode number, for loop */
int isaligned = 0; /* inode allocation at stripe unit */
/* boundary */
+ struct xfs_perag *pag;
args.tp = tp;
args.mp = tp->t_mountp;
@@ -382,9 +383,9 @@ xfs_ialloc_ag_alloc(
newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0);
be32_add_cpu(&agi->agi_count, newlen);
be32_add_cpu(&agi->agi_freecount, newlen);
- down_read(&args.mp->m_peraglock);
- args.mp->m_perag[agno].pagi_freecount += newlen;
- up_read(&args.mp->m_peraglock);
+ pag = xfs_perag_get(args.mp, agno);
+ pag->pagi_freecount += newlen;
+ xfs_perag_put(pag);
agi->agi_newino = cpu_to_be32(newino);
/*
@@ -486,9 +487,8 @@ xfs_ialloc_ag_select(
*/
agno = pagno;
flags = XFS_ALLOC_FLAG_TRYLOCK;
- down_read(&mp->m_peraglock);
for (;;) {
- pag = &mp->m_perag[agno];
+ pag = xfs_perag_get(mp, agno);
if (!pag->pagi_init) {
if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) {
agbp = NULL;
@@ -527,7 +527,7 @@ xfs_ialloc_ag_select(
agbp = NULL;
goto nextag;
}
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
return agbp;
}
}
@@ -535,22 +535,19 @@ unlock_nextag:
if (agbp)
xfs_trans_brelse(tp, agbp);
nextag:
+ xfs_perag_put(pag);
/*
* No point in iterating over the rest, if we're shutting
* down.
*/
- if (XFS_FORCED_SHUTDOWN(mp)) {
- up_read(&mp->m_peraglock);
+ if (XFS_FORCED_SHUTDOWN(mp))
return NULL;
- }
agno++;
if (agno >= agcount)
agno = 0;
if (agno == pagno) {
- if (flags == 0) {
- up_read(&mp->m_peraglock);
+ if (flags == 0)
return NULL;
- }
flags = 0;
}
}
@@ -672,6 +669,7 @@ xfs_dialloc(
xfs_agnumber_t tagno; /* testing allocation group number */
xfs_btree_cur_t *tcur; /* temp cursor */
xfs_inobt_rec_incore_t trec; /* temp inode allocation record */
+ struct xfs_perag *pag;
if (*IO_agbp == NULL) {
@@ -771,13 +769,13 @@ nextag:
*inop = NULLFSINO;
return noroom ? ENOSPC : 0;
}
- down_read(&mp->m_peraglock);
- if (mp->m_perag[tagno].pagi_inodeok == 0) {
- up_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, tagno);
+ if (pag->pagi_inodeok == 0) {
+ xfs_perag_put(pag);
goto nextag;
}
error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp);
- up_read(&mp->m_peraglock);
+ xfs_perag_put(pag);
if (error)
goto nextag;
agi = XFS_BUF_TO_AGI(agbp);
@@ -790,6 +788,7 @@ nextag:
*/
agno = tagno;
*IO_agbp = NULL;
+ pag = xfs_perag_get(mp, agno);
restart_pagno:
cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno));
@@ -808,7 +807,6 @@ nextag:
* If in the same AG as the parent, try to get near the parent.
*/
if (pagno == agno) {
- xfs_perag_t *pag = &mp->m_perag[agno];
int doneleft; /* done, to the left */
int doneright; /* done, to the right */
int searchdistance = 10;
@@ -1006,9 +1004,7 @@ alloc_inode:
goto error0;
be32_add_cpu(&agi->agi_freecount, -1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- down_read(&mp->m_peraglock);
- mp->m_perag[tagno].pagi_freecount--;
- up_read(&mp->m_peraglock);
+ pag->pagi_freecount--;
error = xfs_check_agi_freecount(cur, agi);
if (error)
@@ -1016,12 +1012,14 @@ alloc_inode:
xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1);
+ xfs_perag_put(pag);
*inop = ino;
return 0;
error1:
xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR);
error0:
xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+ xfs_perag_put(pag);
return error;
}
@@ -1052,6 +1050,7 @@ xfs_difree(
xfs_mount_t *mp; /* mount structure for filesystem */
int off; /* offset of inode in inode chunk */
xfs_inobt_rec_incore_t rec; /* btree record */
+ struct xfs_perag *pag;
mp = tp->t_mountp;
@@ -1088,9 +1087,7 @@ xfs_difree(
/*
* Get the allocation group header.
*/
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
cmn_err(CE_WARN,
"xfs_difree: xfs_ialloc_read_agi() returned an error %d on %s. Returning error.",
@@ -1157,9 +1154,9 @@ xfs_difree(
be32_add_cpu(&agi->agi_count, -ilen);
be32_add_cpu(&agi->agi_freecount, -(ilen - 1));
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT);
- down_read(&mp->m_peraglock);
- mp->m_perag[agno].pagi_freecount -= ilen - 1;
- up_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, agno);
+ pag->pagi_freecount -= ilen - 1;
+ xfs_perag_put(pag);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1));
@@ -1188,9 +1185,9 @@ xfs_difree(
*/
be32_add_cpu(&agi->agi_freecount, 1);
xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT);
- down_read(&mp->m_peraglock);
- mp->m_perag[agno].pagi_freecount++;
- up_read(&mp->m_peraglock);
+ pag = xfs_perag_get(mp, agno);
+ pag->pagi_freecount++;
+ xfs_perag_put(pag);
xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1);
}
@@ -1312,9 +1309,7 @@ xfs_imap(
xfs_buf_t *agbp; /* agi buffer */
int i; /* temp state */
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, tp, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
"xfs_ialloc_read_agi() returned "
@@ -1379,7 +1374,6 @@ xfs_imap(
XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
return XFS_ERROR(EINVAL);
}
-
return 0;
}
@@ -1523,8 +1517,7 @@ xfs_ialloc_read_agi(
return error;
agi = XFS_BUF_TO_AGI(*bpp);
- pag = &mp->m_perag[agno];
-
+ pag = xfs_perag_get(mp, agno);
if (!pag->pagi_init) {
pag->pagi_freecount = be32_to_cpu(agi->agi_freecount);
pag->pagi_count = be32_to_cpu(agi->agi_count);
@@ -1537,6 +1530,7 @@ xfs_ialloc_read_agi(
*/
ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) ||
XFS_FORCED_SHUTDOWN(mp));
+ xfs_perag_put(pag);
return 0;
}
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index fa402a6bbbcf..e281eb4a1c49 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -73,7 +73,6 @@ xfs_inode_alloc(
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));
- ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
@@ -375,7 +374,7 @@ xfs_iget(
return EINVAL;
/* get the perag structure and ensure that it's inode capable */
- pag = xfs_get_perag(mp, ino);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
if (!pag->pagi_inodeok)
return EINVAL;
ASSERT(pag->pag_ici_init);
@@ -399,7 +398,7 @@ again:
if (error)
goto out_error_or_again;
}
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
*ipp = ip;
@@ -418,7 +417,7 @@ out_error_or_again:
delay(1);
goto again;
}
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
return error;
}
@@ -489,12 +488,12 @@ xfs_ireclaim(
* added to the tree assert that it's been there before to catch
* problems with the inode life time early on.
*/
- pag = xfs_get_perag(mp, ip->i_ino);
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
write_lock(&pag->pag_ici_lock);
if (!radix_tree_delete(&pag->pag_ici_root, agino))
ASSERT(0);
write_unlock(&pag->pag_ici_lock);
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
/*
* Here we do an (almost) spurious inode lock in order to coordinate
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 391d36b0e68c..fa31360046d4 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -151,7 +151,7 @@ xfs_imap_to_bp(
"an error %d on %s. Returning error.",
error, mp->m_fsname);
} else {
- ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+ ASSERT(buf_flags & XBF_TRYLOCK);
}
return error;
}
@@ -239,7 +239,7 @@ xfs_inotobp(
if (error)
return error;
- error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, imap_flags);
+ error = xfs_imap_to_bp(mp, tp, &imap, &bp, XBF_LOCK, imap_flags);
if (error)
return error;
@@ -285,7 +285,7 @@ xfs_itobp(
return error;
if (!bp) {
- ASSERT(buf_flags & XFS_BUF_TRYLOCK);
+ ASSERT(buf_flags & XBF_TRYLOCK);
ASSERT(tp == NULL);
*bpp = NULL;
return EAGAIN;
@@ -807,7 +807,7 @@ xfs_iread(
* Get pointers to the on-disk inode and the buffer containing it.
*/
error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp,
- XFS_BUF_LOCK, iget_flags);
+ XBF_LOCK, iget_flags);
if (error)
return error;
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
@@ -1751,7 +1751,7 @@ xfs_iunlink(
* Here we put the head pointer into our next pointer,
* and then we fall through to point the head at us.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error)
return error;
@@ -1833,7 +1833,7 @@ xfs_iunlink_remove(
* of dealing with the buffer when there is no need to
* change it.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) {
cmn_err(CE_WARN,
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1895,7 +1895,7 @@ xfs_iunlink_remove(
* Now last_ibp points to the buffer previous to us on
* the unlinked list. Pull us from the list.
*/
- error = xfs_itobp(mp, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, tp, ip, &dip, &ibp, XBF_LOCK);
if (error) {
cmn_err(CE_WARN,
"xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
@@ -1946,8 +1946,9 @@ xfs_ifree_cluster(
xfs_inode_t *ip, **ip_found;
xfs_inode_log_item_t *iip;
xfs_log_item_t *lip;
- xfs_perag_t *pag = xfs_get_perag(mp, inum);
+ struct xfs_perag *pag;
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
blks_per_cluster = 1;
ninodes = mp->m_sb.sb_inopblock;
@@ -2039,7 +2040,7 @@ xfs_ifree_cluster(
bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
mp->m_bsize * blks_per_cluster,
- XFS_BUF_LOCK);
+ XBF_LOCK);
pre_flushed = 0;
lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
@@ -2088,7 +2089,7 @@ xfs_ifree_cluster(
}
kmem_free(ip_found);
- xfs_put_perag(mp, pag);
+ xfs_perag_put(pag);
}
/*
@@ -2150,7 +2151,7 @@ xfs_ifree(
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, XBF_LOCK);
if (error)
return error;
@@ -2483,13 +2484,16 @@ __xfs_iunpin_wait(
return;
/* Give the log a push to start the unpinning I/O */
- xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
- iip->ili_last_lsn : 0, XFS_LOG_FORCE);
+ if (iip && iip->ili_last_lsn)
+ xfs_log_force_lsn(ip->i_mount, iip->ili_last_lsn, 0);
+ else
+ xfs_log_force(ip->i_mount, 0);
+
if (wait)
wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
}
-static inline void
+void
xfs_iunpin_wait(
xfs_inode_t *ip)
{
@@ -2675,7 +2679,7 @@ xfs_iflush_cluster(
xfs_buf_t *bp)
{
xfs_mount_t *mp = ip->i_mount;
- xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
+ struct xfs_perag *pag;
unsigned long first_index, mask;
unsigned long inodes_per_cluster;
int ilist_size;
@@ -2686,6 +2690,7 @@ xfs_iflush_cluster(
int bufwasdelwri;
int i;
+ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
ASSERT(pag->pagi_inodeok);
ASSERT(pag->pag_ici_init);
@@ -2693,7 +2698,7 @@ xfs_iflush_cluster(
ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
if (!ilist)
- return 0;
+ goto out_put;
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
@@ -2762,6 +2767,8 @@ xfs_iflush_cluster(
out_free:
read_unlock(&pag->pag_ici_lock);
kmem_free(ilist);
+out_put:
+ xfs_perag_put(pag);
return 0;
@@ -2805,6 +2812,7 @@ cluster_corrupt_out:
*/
xfs_iflush_abort(iq);
kmem_free(ilist);
+ xfs_perag_put(pag);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -2827,8 +2835,6 @@ xfs_iflush(
xfs_dinode_t *dip;
xfs_mount_t *mp;
int error;
- int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
- enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
XFS_STATS_INC(xs_iflush_count);
@@ -2841,19 +2847,6 @@ xfs_iflush(
mp = ip->i_mount;
/*
- * If the inode isn't dirty, then just release the inode flush lock and
- * do nothing. Treat stale inodes the same; we cannot rely on the
- * backing buffer remaining stale in cache for the remaining life of
- * the stale inode and so xfs_itobp() below may give us a buffer that
- * no longer contains inodes below. Doing this stale check here also
- * avoids forcing the log on pinned, stale inodes.
- */
- if (xfs_inode_clean(ip) || xfs_iflags_test(ip, XFS_ISTALE)) {
- xfs_ifunlock(ip);
- return 0;
- }
-
- /*
* We can't flush the inode until it is unpinned, so wait for it if we
* are allowed to block. We know noone new can pin it, because we are
* holding the inode lock shared and you need to hold it exclusively to
@@ -2864,7 +2857,7 @@ xfs_iflush(
* in the same cluster are dirty, they will probably write the inode
* out for us if they occur after the log force completes.
*/
- if (noblock && xfs_ipincount(ip)) {
+ if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
xfs_iunpin_nowait(ip);
xfs_ifunlock(ip);
return EAGAIN;
@@ -2872,6 +2865,19 @@ xfs_iflush(
xfs_iunpin_wait(ip);
/*
+ * For stale inodes we cannot rely on the backing buffer remaining
+ * stale in cache for the remaining life of the stale inode and so
+ * xfs_itobp() below may give us a buffer that no longer contains
+ * inodes below. We have to check this after ensuring the inode is
+ * unpinned so that it is safe to reclaim the stale inode after the
+ * flush call.
+ */
+ if (xfs_iflags_test(ip, XFS_ISTALE)) {
+ xfs_ifunlock(ip);
+ return 0;
+ }
+
+ /*
* This may have been unpinned because the filesystem is shutting
* down forcibly. If that's the case we must not write this inode
* to disk, because the log record didn't make it to disk!
@@ -2885,60 +2891,10 @@ xfs_iflush(
}
/*
- * Decide how buffer will be flushed out. This is done before
- * the call to xfs_iflush_int because this field is zeroed by it.
- */
- if (iip != NULL && iip->ili_format.ilf_fields != 0) {
- /*
- * Flush out the inode buffer according to the directions
- * of the caller. In the cases where the caller has given
- * us a choice choose the non-delwri case. This is because
- * the inode is in the AIL and we need to get it out soon.
- */
- switch (flags) {
- case XFS_IFLUSH_SYNC:
- case XFS_IFLUSH_DELWRI_ELSE_SYNC:
- flags = 0;
- break;
- case XFS_IFLUSH_ASYNC_NOBLOCK:
- case XFS_IFLUSH_ASYNC:
- case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
- flags = INT_ASYNC;
- break;
- case XFS_IFLUSH_DELWRI:
- flags = INT_DELWRI;
- break;
- default:
- ASSERT(0);
- flags = 0;
- break;
- }
- } else {
- switch (flags) {
- case XFS_IFLUSH_DELWRI_ELSE_SYNC:
- case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
- case XFS_IFLUSH_DELWRI:
- flags = INT_DELWRI;
- break;
- case XFS_IFLUSH_ASYNC_NOBLOCK:
- case XFS_IFLUSH_ASYNC:
- flags = INT_ASYNC;
- break;
- case XFS_IFLUSH_SYNC:
- flags = 0;
- break;
- default:
- ASSERT(0);
- flags = 0;
- break;
- }
- }
-
- /*
* Get the buffer containing the on-disk inode.
*/
error = xfs_itobp(mp, NULL, ip, &dip, &bp,
- noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
+ (flags & SYNC_WAIT) ? XBF_LOCK : XBF_TRYLOCK);
if (error || !bp) {
xfs_ifunlock(ip);
return error;
@@ -2956,7 +2912,7 @@ xfs_iflush(
* get stuck waiting in the write for too long.
*/
if (XFS_BUF_ISPINNED(bp))
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
/*
* inode clustering:
@@ -2966,13 +2922,10 @@ xfs_iflush(
if (error)
goto cluster_corrupt_out;
- if (flags & INT_DELWRI) {
- xfs_bdwrite(mp, bp);
- } else if (flags & INT_ASYNC) {
- error = xfs_bawrite(mp, bp);
- } else {
+ if (flags & SYNC_WAIT)
error = xfs_bwrite(mp, bp);
- }
+ else
+ xfs_bdwrite(mp, bp);
return error;
corrupt_out:
@@ -3007,16 +2960,6 @@ xfs_iflush_int(
iip = ip->i_itemp;
mp = ip->i_mount;
-
- /*
- * If the inode isn't dirty, then just release the inode
- * flush lock and do nothing.
- */
- if (xfs_inode_clean(ip)) {
- xfs_ifunlock(ip);
- return 0;
- }
-
/* set *dip = inode's place in the buffer */
dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_imap.im_boffset);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index ec1f28c4fc4f..6c912b027596 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -420,16 +420,6 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
#define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) >> XFS_ILOCK_SHIFT)
/*
- * Flags for xfs_iflush()
- */
-#define XFS_IFLUSH_DELWRI_ELSE_SYNC 1
-#define XFS_IFLUSH_DELWRI_ELSE_ASYNC 2
-#define XFS_IFLUSH_SYNC 3
-#define XFS_IFLUSH_ASYNC 4
-#define XFS_IFLUSH_DELWRI 5
-#define XFS_IFLUSH_ASYNC_NOBLOCK 6
-
-/*
* Flags for xfs_itruncate_start().
*/
#define XFS_ITRUNC_DEFINITE 0x1
@@ -483,6 +473,7 @@ int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_ipin(xfs_inode_t *);
void xfs_iunpin(xfs_inode_t *);
+void xfs_iunpin_wait(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint);
void xfs_ichgtime(xfs_inode_t *, int);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index f38855d21ea5..d4dc063111f8 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -228,7 +228,7 @@ xfs_inode_item_format(
vecp->i_addr = (xfs_caddr_t)&iip->ili_format;
vecp->i_len = sizeof(xfs_inode_log_format_t);
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IFORMAT);
+ vecp->i_type = XLOG_REG_TYPE_IFORMAT;
vecp++;
nvecs = 1;
@@ -279,7 +279,7 @@ xfs_inode_item_format(
vecp->i_addr = (xfs_caddr_t)&ip->i_d;
vecp->i_len = sizeof(struct xfs_icdinode);
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
+ vecp->i_type = XLOG_REG_TYPE_ICORE;
vecp++;
nvecs++;
iip->ili_format.ilf_fields |= XFS_ILOG_CORE;
@@ -336,7 +336,7 @@ xfs_inode_item_format(
vecp->i_addr =
(char *)(ip->i_df.if_u1.if_extents);
vecp->i_len = ip->i_df.if_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
+ vecp->i_type = XLOG_REG_TYPE_IEXT;
} else
#endif
{
@@ -355,7 +355,7 @@ xfs_inode_item_format(
vecp->i_addr = (xfs_caddr_t)ext_buffer;
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_DATA_FORK);
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IEXT);
+ vecp->i_type = XLOG_REG_TYPE_IEXT;
}
ASSERT(vecp->i_len <= ip->i_df.if_bytes);
iip->ili_format.ilf_dsize = vecp->i_len;
@@ -373,7 +373,7 @@ xfs_inode_item_format(
ASSERT(ip->i_df.if_broot != NULL);
vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot;
vecp->i_len = ip->i_df.if_broot_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IBROOT);
+ vecp->i_type = XLOG_REG_TYPE_IBROOT;
vecp++;
nvecs++;
iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes;
@@ -399,7 +399,7 @@ xfs_inode_item_format(
ASSERT((ip->i_df.if_real_bytes == 0) ||
(ip->i_df.if_real_bytes == data_bytes));
vecp->i_len = (int)data_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ILOCAL);
+ vecp->i_type = XLOG_REG_TYPE_ILOCAL;
vecp++;
nvecs++;
iip->ili_format.ilf_dsize = (unsigned)data_bytes;
@@ -477,7 +477,7 @@ xfs_inode_item_format(
vecp->i_len = xfs_iextents_copy(ip, ext_buffer,
XFS_ATTR_FORK);
#endif
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_EXT);
+ vecp->i_type = XLOG_REG_TYPE_IATTR_EXT;
iip->ili_format.ilf_asize = vecp->i_len;
vecp++;
nvecs++;
@@ -492,7 +492,7 @@ xfs_inode_item_format(
ASSERT(ip->i_afp->if_broot != NULL);
vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot;
vecp->i_len = ip->i_afp->if_broot_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_BROOT);
+ vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT;
vecp++;
nvecs++;
iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes;
@@ -516,7 +516,7 @@ xfs_inode_item_format(
ASSERT((ip->i_afp->if_real_bytes == 0) ||
(ip->i_afp->if_real_bytes == data_bytes));
vecp->i_len = (int)data_bytes;
- XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_IATTR_LOCAL);
+ vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL;
vecp++;
nvecs++;
iip->ili_format.ilf_asize = (unsigned)data_bytes;
@@ -602,33 +602,20 @@ xfs_inode_item_trylock(
if (!xfs_iflock_nowait(ip)) {
/*
- * If someone else isn't already trying to push the inode
- * buffer, we get to do it.
+ * inode has already been flushed to the backing buffer,
+ * leave it locked in shared mode, pushbuf routine will
+ * unlock it.
*/
- if (iip->ili_pushbuf_flag == 0) {
- iip->ili_pushbuf_flag = 1;
-#ifdef DEBUG
- iip->ili_push_owner = current_pid();
-#endif
- /*
- * Inode is left locked in shared mode.
- * Pushbuf routine gets to unlock it.
- */
- return XFS_ITEM_PUSHBUF;
- } else {
- /*
- * We hold the AIL lock, so we must specify the
- * NONOTIFY flag so that we won't double trip.
- */
- xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
- return XFS_ITEM_FLUSHING;
- }
- /* NOTREACHED */
+ return XFS_ITEM_PUSHBUF;
}
/* Stale items should force out the iclog */
if (ip->i_flags & XFS_ISTALE) {
xfs_ifunlock(ip);
+ /*
+ * we hold the AIL lock - notify the unlock routine of this
+ * so it doesn't try to get the lock again.
+ */
xfs_iunlock(ip, XFS_ILOCK_SHARED|XFS_IUNLOCK_NONOTIFY);
return XFS_ITEM_PINNED;
}
@@ -746,11 +733,8 @@ xfs_inode_item_committed(
* This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
* failed to get the inode flush lock but did get the inode locked SHARED.
* Here we're trying to see if the inode buffer is incore, and if so whether it's
- * marked delayed write. If that's the case, we'll initiate a bawrite on that
- * buffer to expedite the process.
- *
- * We aren't holding the AIL lock (or the flush lock) when this gets called,
- * so it is inherently race-y.
+ * marked delayed write. If that's the case, we'll promote it and that will
+ * allow the caller to write the buffer by triggering the xfsbufd to run.
*/
STATIC void
xfs_inode_item_pushbuf(
@@ -759,82 +743,30 @@ xfs_inode_item_pushbuf(
xfs_inode_t *ip;
xfs_mount_t *mp;
xfs_buf_t *bp;
- uint dopush;
ip = iip->ili_inode;
-
ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
/*
- * The ili_pushbuf_flag keeps others from
- * trying to duplicate our effort.
- */
- ASSERT(iip->ili_pushbuf_flag != 0);
- ASSERT(iip->ili_push_owner == current_pid());
-
- /*
* If a flush is not in progress anymore, chances are that the
* inode was taken off the AIL. So, just get out.
*/
if (completion_done(&ip->i_flush) ||
((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0)) {
- iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
}
mp = ip->i_mount;
bp = xfs_incore(mp->m_ddev_targp, iip->ili_format.ilf_blkno,
- iip->ili_format.ilf_len, XFS_INCORE_TRYLOCK);
+ iip->ili_format.ilf_len, XBF_TRYLOCK);
- if (bp != NULL) {
- if (XFS_BUF_ISDELAYWRITE(bp)) {
- /*
- * We were racing with iflush because we don't hold
- * the AIL lock or the flush lock. However, at this point,
- * we have the buffer, and we know that it's dirty.
- * So, it's possible that iflush raced with us, and
- * this item is already taken off the AIL.
- * If not, we can flush it async.
- */
- dopush = ((iip->ili_item.li_flags & XFS_LI_IN_AIL) &&
- !completion_done(&ip->i_flush));
- iip->ili_pushbuf_flag = 0;
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- trace_xfs_inode_item_push(bp, _RET_IP_);
-
- if (XFS_BUF_ISPINNED(bp)) {
- xfs_log_force(mp, (xfs_lsn_t)0,
- XFS_LOG_FORCE);
- }
- if (dopush) {
- int error;
- error = xfs_bawrite(mp, bp);
- if (error)
- xfs_fs_cmn_err(CE_WARN, mp,
- "xfs_inode_item_pushbuf: pushbuf error %d on iip %p, bp %p",
- error, iip, bp);
- } else {
- xfs_buf_relse(bp);
- }
- } else {
- iip->ili_pushbuf_flag = 0;
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- xfs_buf_relse(bp);
- }
- return;
- }
- /*
- * We have to be careful about resetting pushbuf flag too early (above).
- * Even though in theory we can do it as soon as we have the buflock,
- * we don't want others to be doing work needlessly. They'll come to
- * this function thinking that pushing the buffer is their
- * responsibility only to find that the buffer is still locked by
- * another doing the same thing
- */
- iip->ili_pushbuf_flag = 0;
xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ if (!bp)
+ return;
+ if (XFS_BUF_ISDELAYWRITE(bp))
+ xfs_buf_delwri_promote(bp);
+ xfs_buf_relse(bp);
return;
}
@@ -867,10 +799,14 @@ xfs_inode_item_push(
iip->ili_format.ilf_fields != 0);
/*
- * Write out the inode. The completion routine ('iflush_done') will
- * pull it from the AIL, mark it clean, unlock the flush lock.
+ * Push the inode to it's backing buffer. This will not remove the
+ * inode from the AIL - a further push will be required to trigger a
+ * buffer push. However, this allows all the dirty inodes to be pushed
+ * to the buffer before it is pushed to disk. THe buffer IO completion
+ * will pull th einode from the AIL, mark it clean and unlock the flush
+ * lock.
*/
- (void) xfs_iflush(ip, XFS_IFLUSH_ASYNC);
+ (void) xfs_iflush(ip, 0);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
return;
@@ -934,7 +870,6 @@ xfs_inode_item_init(
/*
We have zeroed memory. No need ...
iip->ili_extents_buf = NULL;
- iip->ili_pushbuf_flag = 0;
*/
iip->ili_format.ilf_type = XFS_LI_INODE;
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index cc8df1ac7783..9a467958ecdd 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -144,12 +144,6 @@ typedef struct xfs_inode_log_item {
data exts */
struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged
attr exts */
- unsigned int ili_pushbuf_flag; /* one bit used in push_ail */
-
-#ifdef DEBUG
- uint64_t ili_push_owner; /* one who sets pushbuf_flag
- above gets to push the buf */
-#endif
#ifdef XFS_TRANS_DEBUG
int ili_root_size;
char *ili_orig_root;
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 62efab2f3839..3af02314c605 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -408,8 +408,10 @@ xfs_bulkstat(
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog;
- irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4,
- KM_SLEEP | KM_MAYFAIL | KM_LARGE);
+ irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
+ if (!irbuf)
+ return ENOMEM;
+
nirbuf = irbsize / sizeof(*irbuf);
/*
@@ -420,9 +422,7 @@ xfs_bulkstat(
while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) {
cond_resched();
bp = NULL;
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
/*
* Skip this allocation group and go to the next one.
@@ -729,7 +729,7 @@ xfs_bulkstat(
/*
* Done, we're either out of filesystem or space to put the data.
*/
- kmem_free(irbuf);
+ kmem_free_large(irbuf);
*ubcountp = ubelem;
/*
* Found some inodes, return them now and return the error next time.
@@ -849,9 +849,7 @@ xfs_inumbers(
agbp = NULL;
while (left > 0 && agno < mp->m_sb.sb_agcount) {
if (agbp == NULL) {
- down_read(&mp->m_peraglock);
error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
- up_read(&mp->m_peraglock);
if (error) {
/*
* If we can't read the AGI of this ag,
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 600b5b06aaeb..4f16be4b6ee5 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -50,7 +50,6 @@ kmem_zone_t *xfs_log_ticket_zone;
(off) += (bytes);}
/* Local miscellaneous function prototypes */
-STATIC int xlog_bdstrat_cb(struct xfs_buf *);
STATIC int xlog_commit_record(xfs_mount_t *mp, xlog_ticket_t *ticket,
xlog_in_core_t **, xfs_lsn_t *);
STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
@@ -80,11 +79,6 @@ STATIC int xlog_state_release_iclog(xlog_t *log,
STATIC void xlog_state_switch_iclogs(xlog_t *log,
xlog_in_core_t *iclog,
int eventual_size);
-STATIC int xlog_state_sync(xlog_t *log,
- xfs_lsn_t lsn,
- uint flags,
- int *log_flushed);
-STATIC int xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed);
STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
/* local functions to manipulate grant head */
@@ -297,65 +291,6 @@ xfs_log_done(xfs_mount_t *mp,
return lsn;
} /* xfs_log_done */
-
-/*
- * Force the in-core log to disk. If flags == XFS_LOG_SYNC,
- * the force is done synchronously.
- *
- * Asynchronous forces are implemented by setting the WANT_SYNC
- * bit in the appropriate in-core log and then returning.
- *
- * Synchronous forces are implemented with a signal variable. All callers
- * to force a given lsn to disk will wait on a the sv attached to the
- * specific in-core log. When given in-core log finally completes its
- * write to disk, that thread will wake up all threads waiting on the
- * sv.
- */
-int
-_xfs_log_force(
- xfs_mount_t *mp,
- xfs_lsn_t lsn,
- uint flags,
- int *log_flushed)
-{
- xlog_t *log = mp->m_log;
- int dummy;
-
- if (!log_flushed)
- log_flushed = &dummy;
-
- ASSERT(flags & XFS_LOG_FORCE);
-
- XFS_STATS_INC(xs_log_force);
-
- if (log->l_flags & XLOG_IO_ERROR)
- return XFS_ERROR(EIO);
- if (lsn == 0)
- return xlog_state_sync_all(log, flags, log_flushed);
- else
- return xlog_state_sync(log, lsn, flags, log_flushed);
-} /* _xfs_log_force */
-
-/*
- * Wrapper for _xfs_log_force(), to be used when caller doesn't care
- * about errors or whether the log was flushed or not. This is the normal
- * interface to use when trying to unpin items or move the log forward.
- */
-void
-xfs_log_force(
- xfs_mount_t *mp,
- xfs_lsn_t lsn,
- uint flags)
-{
- int error;
- error = _xfs_log_force(mp, lsn, flags, NULL);
- if (error) {
- xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
- "error %d returned.", error);
- }
-}
-
-
/*
* Attaches a new iclog I/O completion callback routine during
* transaction commit. If the log is in error state, a non-zero
@@ -602,7 +537,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
if (mp->m_flags & XFS_MOUNT_RDONLY)
return 0;
- error = _xfs_log_force(mp, 0, XFS_LOG_FORCE|XFS_LOG_SYNC, NULL);
+ error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
#ifdef DEBUG
@@ -618,7 +553,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
if (! (XLOG_FORCED_SHUTDOWN(log))) {
reg[0].i_addr = (void*)&magic;
reg[0].i_len = sizeof(magic);
- XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_UNMOUNT);
+ reg[0].i_type = XLOG_REG_TYPE_UNMOUNT;
error = xfs_log_reserve(mp, 600, 1, &tic,
XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
@@ -988,35 +923,6 @@ xlog_iodone(xfs_buf_t *bp)
} /* xlog_iodone */
/*
- * The bdstrat callback function for log bufs. This gives us a central
- * place to trap bufs in case we get hit by a log I/O error and need to
- * shutdown. Actually, in practice, even when we didn't get a log error,
- * we transition the iclogs to IOERROR state *after* flushing all existing
- * iclogs to disk. This is because we don't want anymore new transactions to be
- * started or completed afterwards.
- */
-STATIC int
-xlog_bdstrat_cb(struct xfs_buf *bp)
-{
- xlog_in_core_t *iclog;
-
- iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
-
- if ((iclog->ic_state & XLOG_STATE_IOERROR) == 0) {
- /* note for irix bstrat will need struct bdevsw passed
- * Fix the following macro if the code ever is merged
- */
- XFS_bdstrat(bp);
- return 0;
- }
-
- XFS_BUF_ERROR(bp, EIO);
- XFS_BUF_STALE(bp);
- xfs_biodone(bp);
- return XFS_ERROR(EIO);
-}
-
-/*
* Return size of each in-core log record buffer.
*
* All machines get 8 x 32kB buffers by default, unless tuned otherwise.
@@ -1158,7 +1064,6 @@ xlog_alloc_log(xfs_mount_t *mp,
if (!bp)
goto out_free_log;
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
ASSERT(XFS_BUF_ISBUSY(bp));
ASSERT(XFS_BUF_VALUSEMA(bp) <= 0);
@@ -1196,7 +1101,6 @@ xlog_alloc_log(xfs_mount_t *mp,
if (!XFS_BUF_CPSEMA(bp))
ASSERT(0);
XFS_BUF_SET_IODONE_FUNC(bp, xlog_iodone);
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xlog_bdstrat_cb);
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)1);
iclog->ic_bp = bp;
iclog->ic_data = bp->b_addr;
@@ -1268,7 +1172,7 @@ xlog_commit_record(xfs_mount_t *mp,
reg[0].i_addr = NULL;
reg[0].i_len = 0;
- XLOG_VEC_SET_TYPE(&reg[0], XLOG_REG_TYPE_COMMIT);
+ reg[0].i_type = XLOG_REG_TYPE_COMMIT;
ASSERT_ALWAYS(iclog);
if ((error = xlog_write(mp, reg, 1, ticket, commitlsnp,
@@ -1343,6 +1247,37 @@ xlog_grant_push_ail(xfs_mount_t *mp,
xfs_trans_ail_push(log->l_ailp, threshold_lsn);
} /* xlog_grant_push_ail */
+/*
+ * The bdstrat callback function for log bufs. This gives us a central
+ * place to trap bufs in case we get hit by a log I/O error and need to
+ * shutdown. Actually, in practice, even when we didn't get a log error,
+ * we transition the iclogs to IOERROR state *after* flushing all existing
+ * iclogs to disk. This is because we don't want anymore new transactions to be
+ * started or completed afterwards.
+ */
+STATIC int
+xlog_bdstrat(
+ struct xfs_buf *bp)
+{
+ struct xlog_in_core *iclog;
+
+ iclog = XFS_BUF_FSPRIVATE(bp, xlog_in_core_t *);
+ if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ XFS_BUF_ERROR(bp, EIO);
+ XFS_BUF_STALE(bp);
+ xfs_biodone(bp);
+ /*
+ * It would seem logical to return EIO here, but we rely on
+ * the log state machine to propagate I/O errors instead of
+ * doing it here.
+ */
+ return 0;
+ }
+
+ bp->b_flags |= _XBF_RUN_QUEUES;
+ xfs_buf_iorequest(bp);
+ return 0;
+}
/*
* Flush out the in-core log (iclog) to the on-disk log in an asynchronous
@@ -1462,7 +1397,7 @@ xlog_sync(xlog_t *log,
*/
XFS_BUF_WRITE(bp);
- if ((error = XFS_bwrite(bp))) {
+ if ((error = xlog_bdstrat(bp))) {
xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
XFS_BUF_ADDR(bp));
return error;
@@ -1502,7 +1437,7 @@ xlog_sync(xlog_t *log,
/* account for internal log which doesn't start at block #0 */
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
XFS_BUF_WRITE(bp);
- if ((error = XFS_bwrite(bp))) {
+ if ((error = xlog_bdstrat(bp))) {
xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
bp, XFS_BUF_ADDR(bp));
return error;
@@ -2854,7 +2789,6 @@ xlog_state_switch_iclogs(xlog_t *log,
log->l_iclog = iclog->ic_next;
} /* xlog_state_switch_iclogs */
-
/*
* Write out all data in the in-core log as of this exact moment in time.
*
@@ -2882,11 +2816,17 @@ xlog_state_switch_iclogs(xlog_t *log,
* b) when we return from flushing out this iclog, it is still
* not in the active nor dirty state.
*/
-STATIC int
-xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
+int
+_xfs_log_force(
+ struct xfs_mount *mp,
+ uint flags,
+ int *log_flushed)
{
- xlog_in_core_t *iclog;
- xfs_lsn_t lsn;
+ struct log *log = mp->m_log;
+ struct xlog_in_core *iclog;
+ xfs_lsn_t lsn;
+
+ XFS_STATS_INC(xs_log_force);
spin_lock(&log->l_icloglock);
@@ -2932,7 +2872,9 @@ xlog_state_sync_all(xlog_t *log, uint flags, int *log_flushed)
if (xlog_state_release_iclog(log, iclog))
return XFS_ERROR(EIO);
- *log_flushed = 1;
+
+ if (log_flushed)
+ *log_flushed = 1;
spin_lock(&log->l_icloglock);
if (be64_to_cpu(iclog->ic_header.h_lsn) == lsn &&
iclog->ic_state != XLOG_STATE_DIRTY)
@@ -2976,19 +2918,37 @@ maybe_sleep:
*/
if (iclog->ic_state & XLOG_STATE_IOERROR)
return XFS_ERROR(EIO);
- *log_flushed = 1;
-
+ if (log_flushed)
+ *log_flushed = 1;
} else {
no_sleep:
spin_unlock(&log->l_icloglock);
}
return 0;
-} /* xlog_state_sync_all */
+}
+/*
+ * Wrapper for _xfs_log_force(), to be used when caller doesn't care
+ * about errors or whether the log was flushed or not. This is the normal
+ * interface to use when trying to unpin items or move the log forward.
+ */
+void
+xfs_log_force(
+ xfs_mount_t *mp,
+ uint flags)
+{
+ int error;
+
+ error = _xfs_log_force(mp, flags, NULL);
+ if (error) {
+ xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
+ "error %d returned.", error);
+ }
+}
/*
- * Used by code which implements synchronous log forces.
+ * Force the in-core log to disk for a specific LSN.
*
* Find in-core log with lsn.
* If it is in the DIRTY state, just return.
@@ -2996,109 +2956,142 @@ no_sleep:
* state and go to sleep or return.
* If it is in any other state, go to sleep or return.
*
- * If filesystem activity goes to zero, the iclog will get flushed only by
- * bdflush().
+ * Synchronous forces are implemented with a signal variable. All callers
+ * to force a given lsn to disk will wait on a the sv attached to the
+ * specific in-core log. When given in-core log finally completes its
+ * write to disk, that thread will wake up all threads waiting on the
+ * sv.
*/
-STATIC int
-xlog_state_sync(xlog_t *log,
- xfs_lsn_t lsn,
- uint flags,
- int *log_flushed)
+int
+_xfs_log_force_lsn(
+ struct xfs_mount *mp,
+ xfs_lsn_t lsn,
+ uint flags,
+ int *log_flushed)
{
- xlog_in_core_t *iclog;
- int already_slept = 0;
+ struct log *log = mp->m_log;
+ struct xlog_in_core *iclog;
+ int already_slept = 0;
-try_again:
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
+ ASSERT(lsn != 0);
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
- spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
- }
-
- do {
- if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
- iclog = iclog->ic_next;
- continue;
- }
+ XFS_STATS_INC(xs_log_force);
- if (iclog->ic_state == XLOG_STATE_DIRTY) {
+try_again:
+ spin_lock(&log->l_icloglock);
+ iclog = log->l_iclog;
+ if (iclog->ic_state & XLOG_STATE_IOERROR) {
spin_unlock(&log->l_icloglock);
- return 0;
+ return XFS_ERROR(EIO);
}
- if (iclog->ic_state == XLOG_STATE_ACTIVE) {
- /*
- * We sleep here if we haven't already slept (e.g.
- * this is the first time we've looked at the correct
- * iclog buf) and the buffer before us is going to
- * be sync'ed. The reason for this is that if we
- * are doing sync transactions here, by waiting for
- * the previous I/O to complete, we can allow a few
- * more transactions into this iclog before we close
- * it down.
- *
- * Otherwise, we mark the buffer WANT_SYNC, and bump
- * up the refcnt so we can release the log (which drops
- * the ref count). The state switch keeps new transaction
- * commits from using this buffer. When the current commits
- * finish writing into the buffer, the refcount will drop to
- * zero and the buffer will go out then.
- */
- if (!already_slept &&
- (iclog->ic_prev->ic_state & (XLOG_STATE_WANT_SYNC |
- XLOG_STATE_SYNCING))) {
- ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
- XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_prev->ic_write_wait, PSWP,
- &log->l_icloglock, s);
- *log_flushed = 1;
- already_slept = 1;
- goto try_again;
- } else {
+ do {
+ if (be64_to_cpu(iclog->ic_header.h_lsn) != lsn) {
+ iclog = iclog->ic_next;
+ continue;
+ }
+
+ if (iclog->ic_state == XLOG_STATE_DIRTY) {
+ spin_unlock(&log->l_icloglock);
+ return 0;
+ }
+
+ if (iclog->ic_state == XLOG_STATE_ACTIVE) {
+ /*
+ * We sleep here if we haven't already slept (e.g.
+ * this is the first time we've looked at the correct
+ * iclog buf) and the buffer before us is going to
+ * be sync'ed. The reason for this is that if we
+ * are doing sync transactions here, by waiting for
+ * the previous I/O to complete, we can allow a few
+ * more transactions into this iclog before we close
+ * it down.
+ *
+ * Otherwise, we mark the buffer WANT_SYNC, and bump
+ * up the refcnt so we can release the log (which
+ * drops the ref count). The state switch keeps new
+ * transaction commits from using this buffer. When
+ * the current commits finish writing into the buffer,
+ * the refcount will drop to zero and the buffer will
+ * go out then.
+ */
+ if (!already_slept &&
+ (iclog->ic_prev->ic_state &
+ (XLOG_STATE_WANT_SYNC | XLOG_STATE_SYNCING))) {
+ ASSERT(!(iclog->ic_state & XLOG_STATE_IOERROR));
+
+ XFS_STATS_INC(xs_log_force_sleep);
+
+ sv_wait(&iclog->ic_prev->ic_write_wait,
+ PSWP, &log->l_icloglock, s);
+ if (log_flushed)
+ *log_flushed = 1;
+ already_slept = 1;
+ goto try_again;
+ }
atomic_inc(&iclog->ic_refcnt);
xlog_state_switch_iclogs(log, iclog, 0);
spin_unlock(&log->l_icloglock);
if (xlog_state_release_iclog(log, iclog))
return XFS_ERROR(EIO);
- *log_flushed = 1;
+ if (log_flushed)
+ *log_flushed = 1;
spin_lock(&log->l_icloglock);
}
- }
- if ((flags & XFS_LOG_SYNC) && /* sleep */
- !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
+ if ((flags & XFS_LOG_SYNC) && /* sleep */
+ !(iclog->ic_state &
+ (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
+ /*
+ * Don't wait on completion if we know that we've
+ * gotten a log write error.
+ */
+ if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ spin_unlock(&log->l_icloglock);
+ return XFS_ERROR(EIO);
+ }
+ XFS_STATS_INC(xs_log_force_sleep);
+ sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
+ /*
+ * No need to grab the log lock here since we're
+ * only deciding whether or not to return EIO
+ * and the memory read should be atomic.
+ */
+ if (iclog->ic_state & XLOG_STATE_IOERROR)
+ return XFS_ERROR(EIO);
- /*
- * Don't wait on completion if we know that we've
- * gotten a log write error.
- */
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
+ if (log_flushed)
+ *log_flushed = 1;
+ } else { /* just return */
spin_unlock(&log->l_icloglock);
- return XFS_ERROR(EIO);
}
- XFS_STATS_INC(xs_log_force_sleep);
- sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
- /*
- * No need to grab the log lock here since we're
- * only deciding whether or not to return EIO
- * and the memory read should be atomic.
- */
- if (iclog->ic_state & XLOG_STATE_IOERROR)
- return XFS_ERROR(EIO);
- *log_flushed = 1;
- } else { /* just return */
- spin_unlock(&log->l_icloglock);
- }
- return 0;
- } while (iclog != log->l_iclog);
+ return 0;
+ } while (iclog != log->l_iclog);
- spin_unlock(&log->l_icloglock);
- return 0;
-} /* xlog_state_sync */
+ spin_unlock(&log->l_icloglock);
+ return 0;
+}
+/*
+ * Wrapper for _xfs_log_force_lsn(), to be used when caller doesn't care
+ * about errors or whether the log was flushed or not. This is the normal
+ * interface to use when trying to unpin items or move the log forward.
+ */
+void
+xfs_log_force_lsn(
+ xfs_mount_t *mp,
+ xfs_lsn_t lsn,
+ uint flags)
+{
+ int error;
+
+ error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
+ if (error) {
+ xfs_fs_cmn_err(CE_WARN, mp, "xfs_log_force: "
+ "error %d returned.", error);
+ }
+}
/*
* Called when we want to mark the current iclog as being ready to sync to
@@ -3463,7 +3456,6 @@ xfs_log_force_umount(
xlog_ticket_t *tic;
xlog_t *log;
int retval;
- int dummy;
log = mp->m_log;
@@ -3537,13 +3529,14 @@ xfs_log_force_umount(
}
spin_unlock(&log->l_grant_lock);
- if (! (log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
+ if (!(log->l_iclog->ic_state & XLOG_STATE_IOERROR)) {
ASSERT(!logerror);
/*
* Force the incore logs to disk before shutting the
* log down completely.
*/
- xlog_state_sync_all(log, XFS_LOG_FORCE|XFS_LOG_SYNC, &dummy);
+ _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
+
spin_lock(&log->l_icloglock);
retval = xlog_state_ioerror(log);
spin_unlock(&log->l_icloglock);
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index d0c9baa50b1a..7074be9d13e9 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -70,14 +70,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
* Flags to xfs_log_force()
*
* XFS_LOG_SYNC: Synchronous force in-core log to disk
- * XFS_LOG_FORCE: Start in-core log write now.
- * XFS_LOG_URGE: Start write within some window of time.
- *
- * Note: Either XFS_LOG_FORCE or XFS_LOG_URGE must be set.
*/
#define XFS_LOG_SYNC 0x1
-#define XFS_LOG_FORCE 0x2
-#define XFS_LOG_URGE 0x4
#endif /* __KERNEL__ */
@@ -110,10 +104,8 @@ static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
#define XLOG_REG_TYPE_TRANSHDR 19
#define XLOG_REG_TYPE_MAX 19
-#define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
-
typedef struct xfs_log_iovec {
- xfs_caddr_t i_addr; /* beginning address of region */
+ xfs_caddr_t i_addr; /* beginning address of region */
int i_len; /* length in bytes of region */
uint i_type; /* type of region */
} xfs_log_iovec_t;
@@ -140,12 +132,17 @@ xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
void **iclog,
uint flags);
int _xfs_log_force(struct xfs_mount *mp,
- xfs_lsn_t lsn,
uint flags,
int *log_forced);
void xfs_log_force(struct xfs_mount *mp,
- xfs_lsn_t lsn,
uint flags);
+int _xfs_log_force_lsn(struct xfs_mount *mp,
+ xfs_lsn_t lsn,
+ uint flags,
+ int *log_forced);
+void xfs_log_force_lsn(struct xfs_mount *mp,
+ xfs_lsn_t lsn,
+ uint flags);
int xfs_log_mount(struct xfs_mount *mp,
struct xfs_buftarg *log_target,
xfs_daddr_t start_block,
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index d55662db7077..fd02a18facd5 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -443,14 +443,9 @@ typedef struct log {
/* common routines */
extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
-extern int xlog_find_tail(xlog_t *log,
- xfs_daddr_t *head_blk,
- xfs_daddr_t *tail_blk);
extern int xlog_recover(xlog_t *log);
extern int xlog_recover_finish(xlog_t *log);
extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
-extern struct xfs_buf *xlog_get_bp(xlog_t *, int);
-extern void xlog_put_bp(struct xfs_buf *);
extern kmem_zone_t *xfs_log_ticket_zone;
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 69ac2e5ef20c..22e6efdc17ea 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -50,8 +50,6 @@
STATIC int xlog_find_zeroed(xlog_t *, xfs_daddr_t *);
STATIC int xlog_clear_stale_blocks(xlog_t *, xfs_lsn_t);
-STATIC void xlog_recover_insert_item_backq(xlog_recover_item_t **q,
- xlog_recover_item_t *item);
#if defined(DEBUG)
STATIC void xlog_recover_check_summary(xlog_t *);
#else
@@ -68,7 +66,7 @@ STATIC void xlog_recover_check_summary(xlog_t *);
((bbs + (log)->l_sectbb_mask + 1) & ~(log)->l_sectbb_mask) : (bbs) )
#define XLOG_SECTOR_ROUNDDOWN_BLKNO(log, bno) ((bno) & ~(log)->l_sectbb_mask)
-xfs_buf_t *
+STATIC xfs_buf_t *
xlog_get_bp(
xlog_t *log,
int nbblks)
@@ -88,7 +86,7 @@ xlog_get_bp(
return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
}
-void
+STATIC void
xlog_put_bp(
xfs_buf_t *bp)
{
@@ -805,7 +803,7 @@ xlog_find_head(
* We could speed up search by using current head_blk buffer, but it is not
* available.
*/
-int
+STATIC int
xlog_find_tail(
xlog_t *log,
xfs_daddr_t *head_blk,
@@ -1367,36 +1365,45 @@ xlog_clear_stale_blocks(
STATIC xlog_recover_t *
xlog_recover_find_tid(
- xlog_recover_t *q,
+ struct hlist_head *head,
xlog_tid_t tid)
{
- xlog_recover_t *p = q;
+ xlog_recover_t *trans;
+ struct hlist_node *n;
- while (p != NULL) {
- if (p->r_log_tid == tid)
- break;
- p = p->r_next;
+ hlist_for_each_entry(trans, n, head, r_list) {
+ if (trans->r_log_tid == tid)
+ return trans;
}
- return p;
+ return NULL;
}
STATIC void
-xlog_recover_put_hashq(
- xlog_recover_t **q,
- xlog_recover_t *trans)
+xlog_recover_new_tid(
+ struct hlist_head *head,
+ xlog_tid_t tid,
+ xfs_lsn_t lsn)
{
- trans->r_next = *q;
- *q = trans;
+ xlog_recover_t *trans;
+
+ trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
+ trans->r_log_tid = tid;
+ trans->r_lsn = lsn;
+ INIT_LIST_HEAD(&trans->r_itemq);
+
+ INIT_HLIST_NODE(&trans->r_list);
+ hlist_add_head(&trans->r_list, head);
}
STATIC void
xlog_recover_add_item(
- xlog_recover_item_t **itemq)
+ struct list_head *head)
{
xlog_recover_item_t *item;
item = kmem_zalloc(sizeof(xlog_recover_item_t), KM_SLEEP);
- xlog_recover_insert_item_backq(itemq, item);
+ INIT_LIST_HEAD(&item->ri_list);
+ list_add_tail(&item->ri_list, head);
}
STATIC int
@@ -1409,8 +1416,7 @@ xlog_recover_add_to_cont_trans(
xfs_caddr_t ptr, old_ptr;
int old_len;
- item = trans->r_itemq;
- if (item == NULL) {
+ if (list_empty(&trans->r_itemq)) {
/* finish copying rest of trans header */
xlog_recover_add_item(&trans->r_itemq);
ptr = (xfs_caddr_t) &trans->r_theader +
@@ -1418,7 +1424,8 @@ xlog_recover_add_to_cont_trans(
memcpy(ptr, dp, len); /* d, s, l */
return 0;
}
- item = item->ri_prev;
+ /* take the tail entry */
+ item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
old_ptr = item->ri_buf[item->ri_cnt-1].i_addr;
old_len = item->ri_buf[item->ri_cnt-1].i_len;
@@ -1455,8 +1462,7 @@ xlog_recover_add_to_trans(
if (!len)
return 0;
- item = trans->r_itemq;
- if (item == NULL) {
+ if (list_empty(&trans->r_itemq)) {
/* we need to catch log corruptions here */
if (*(uint *)dp != XFS_TRANS_HEADER_MAGIC) {
xlog_warn("XFS: xlog_recover_add_to_trans: "
@@ -1474,12 +1480,15 @@ xlog_recover_add_to_trans(
memcpy(ptr, dp, len);
in_f = (xfs_inode_log_format_t *)ptr;
- if (item->ri_prev->ri_total != 0 &&
- item->ri_prev->ri_total == item->ri_prev->ri_cnt) {
+ /* take the tail entry */
+ item = list_entry(trans->r_itemq.prev, xlog_recover_item_t, ri_list);
+ if (item->ri_total != 0 &&
+ item->ri_total == item->ri_cnt) {
+ /* tail item is in use, get a new one */
xlog_recover_add_item(&trans->r_itemq);
+ item = list_entry(trans->r_itemq.prev,
+ xlog_recover_item_t, ri_list);
}
- item = trans->r_itemq;
- item = item->ri_prev;
if (item->ri_total == 0) { /* first region to be added */
if (in_f->ilf_size == 0 ||
@@ -1504,96 +1513,29 @@ xlog_recover_add_to_trans(
return 0;
}
-STATIC void
-xlog_recover_new_tid(
- xlog_recover_t **q,
- xlog_tid_t tid,
- xfs_lsn_t lsn)
-{
- xlog_recover_t *trans;
-
- trans = kmem_zalloc(sizeof(xlog_recover_t), KM_SLEEP);
- trans->r_log_tid = tid;
- trans->r_lsn = lsn;
- xlog_recover_put_hashq(q, trans);
-}
-
-STATIC int
-xlog_recover_unlink_tid(
- xlog_recover_t **q,
- xlog_recover_t *trans)
-{
- xlog_recover_t *tp;
- int found = 0;
-
- ASSERT(trans != NULL);
- if (trans == *q) {
- *q = (*q)->r_next;
- } else {
- tp = *q;
- while (tp) {
- if (tp->r_next == trans) {
- found = 1;
- break;
- }
- tp = tp->r_next;
- }
- if (!found) {
- xlog_warn(
- "XFS: xlog_recover_unlink_tid: trans not found");
- ASSERT(0);
- return XFS_ERROR(EIO);
- }
- tp->r_next = tp->r_next->r_next;
- }
- return 0;
-}
-
-STATIC void
-xlog_recover_insert_item_backq(
- xlog_recover_item_t **q,
- xlog_recover_item_t *item)
-{
- if (*q == NULL) {
- item->ri_prev = item->ri_next = item;
- *q = item;
- } else {
- item->ri_next = *q;
- item->ri_prev = (*q)->ri_prev;
- (*q)->ri_prev = item;
- item->ri_prev->ri_next = item;
- }
-}
-
-STATIC void
-xlog_recover_insert_item_frontq(
- xlog_recover_item_t **q,
- xlog_recover_item_t *item)
-{
- xlog_recover_insert_item_backq(q, item);
- *q = item;
-}
-
+/*
+ * Sort the log items in the transaction. Cancelled buffers need
+ * to be put first so they are processed before any items that might
+ * modify the buffers. If they are cancelled, then the modifications
+ * don't need to be replayed.
+ */
STATIC int
xlog_recover_reorder_trans(
xlog_recover_t *trans)
{
- xlog_recover_item_t *first_item, *itemq, *itemq_next;
- xfs_buf_log_format_t *buf_f;
- ushort flags = 0;
+ xlog_recover_item_t *item, *n;
+ LIST_HEAD(sort_list);
- first_item = itemq = trans->r_itemq;
- trans->r_itemq = NULL;
- do {
- itemq_next = itemq->ri_next;
- buf_f = (xfs_buf_log_format_t *)itemq->ri_buf[0].i_addr;
+ list_splice_init(&trans->r_itemq, &sort_list);
+ list_for_each_entry_safe(item, n, &sort_list, ri_list) {
+ xfs_buf_log_format_t *buf_f;
- switch (ITEM_TYPE(itemq)) {
+ buf_f = (xfs_buf_log_format_t *)item->ri_buf[0].i_addr;
+
+ switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
- flags = buf_f->blf_flags;
- if (!(flags & XFS_BLI_CANCEL)) {
- xlog_recover_insert_item_frontq(&trans->r_itemq,
- itemq);
+ if (!(buf_f->blf_flags & XFS_BLI_CANCEL)) {
+ list_move(&item->ri_list, &trans->r_itemq);
break;
}
case XFS_LI_INODE:
@@ -1601,7 +1543,7 @@ xlog_recover_reorder_trans(
case XFS_LI_QUOTAOFF:
case XFS_LI_EFD:
case XFS_LI_EFI:
- xlog_recover_insert_item_backq(&trans->r_itemq, itemq);
+ list_move_tail(&item->ri_list, &trans->r_itemq);
break;
default:
xlog_warn(
@@ -1609,8 +1551,8 @@ xlog_recover_reorder_trans(
ASSERT(0);
return XFS_ERROR(EIO);
}
- itemq = itemq_next;
- } while (first_item != itemq);
+ }
+ ASSERT(list_empty(&sort_list));
return 0;
}
@@ -2242,9 +2184,9 @@ xlog_recover_do_buffer_trans(
}
mp = log->l_mp;
- buf_flags = XFS_BUF_LOCK;
+ buf_flags = XBF_LOCK;
if (!(flags & XFS_BLI_INODE_BUF))
- buf_flags |= XFS_BUF_MAPPED;
+ buf_flags |= XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, blkno, len, buf_flags);
if (XFS_BUF_ISERROR(bp)) {
@@ -2346,7 +2288,7 @@ xlog_recover_do_inode_trans(
}
bp = xfs_buf_read(mp->m_ddev_targp, in_f->ilf_blkno, in_f->ilf_len,
- XFS_BUF_LOCK);
+ XBF_LOCK);
if (XFS_BUF_ISERROR(bp)) {
xfs_ioerror_alert("xlog_recover_do..(read#2)", mp,
bp, in_f->ilf_blkno);
@@ -2814,14 +2756,13 @@ xlog_recover_do_trans(
int pass)
{
int error = 0;
- xlog_recover_item_t *item, *first_item;
+ xlog_recover_item_t *item;
error = xlog_recover_reorder_trans(trans);
if (error)
return error;
- first_item = item = trans->r_itemq;
- do {
+ list_for_each_entry(item, &trans->r_itemq, ri_list) {
switch (ITEM_TYPE(item)) {
case XFS_LI_BUF:
error = xlog_recover_do_buffer_trans(log, item, pass);
@@ -2854,8 +2795,7 @@ xlog_recover_do_trans(
if (error)
return error;
- item = item->ri_next;
- } while (first_item != item);
+ }
return 0;
}
@@ -2869,21 +2809,18 @@ STATIC void
xlog_recover_free_trans(
xlog_recover_t *trans)
{
- xlog_recover_item_t *first_item, *item, *free_item;
+ xlog_recover_item_t *item, *n;
int i;
- item = first_item = trans->r_itemq;
- do {
- free_item = item;
- item = item->ri_next;
- /* Free the regions in the item. */
- for (i = 0; i < free_item->ri_cnt; i++) {
- kmem_free(free_item->ri_buf[i].i_addr);
- }
+ list_for_each_entry_safe(item, n, &trans->r_itemq, ri_list) {
+ /* Free the regions in the item. */
+ list_del(&item->ri_list);
+ for (i = 0; i < item->ri_cnt; i++)
+ kmem_free(item->ri_buf[i].i_addr);
/* Free the item itself */
- kmem_free(free_item->ri_buf);
- kmem_free(free_item);
- } while (first_item != item);
+ kmem_free(item->ri_buf);
+ kmem_free(item);
+ }
/* Free the transaction recover structure */
kmem_free(trans);
}
@@ -2891,14 +2828,12 @@ xlog_recover_free_trans(
STATIC int
xlog_recover_commit_trans(
xlog_t *log,
- xlog_recover_t **q,
xlog_recover_t *trans,
int pass)
{
int error;
- if ((error = xlog_recover_unlink_tid(q, trans)))
- return error;
+ hlist_del(&trans->r_list);
if ((error = xlog_recover_do_trans(log, trans, pass)))
return error;
xlog_recover_free_trans(trans); /* no error */
@@ -2926,7 +2861,7 @@ xlog_recover_unmount_trans(
STATIC int
xlog_recover_process_data(
xlog_t *log,
- xlog_recover_t *rhash[],
+ struct hlist_head rhash[],
xlog_rec_header_t *rhead,
xfs_caddr_t dp,
int pass)
@@ -2960,7 +2895,7 @@ xlog_recover_process_data(
}
tid = be32_to_cpu(ohead->oh_tid);
hash = XLOG_RHASH(tid);
- trans = xlog_recover_find_tid(rhash[hash], tid);
+ trans = xlog_recover_find_tid(&rhash[hash], tid);
if (trans == NULL) { /* not found; add new tid */
if (ohead->oh_flags & XLOG_START_TRANS)
xlog_recover_new_tid(&rhash[hash], tid,
@@ -2978,7 +2913,7 @@ xlog_recover_process_data(
switch (flags) {
case XLOG_COMMIT_TRANS:
error = xlog_recover_commit_trans(log,
- &rhash[hash], trans, pass);
+ trans, pass);
break;
case XLOG_UNMOUNT_TRANS:
error = xlog_recover_unmount_trans(trans);
@@ -3211,7 +3146,7 @@ xlog_recover_process_one_iunlink(
/*
* Get the on disk inode to find the next inode in the bucket.
*/
- error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XFS_BUF_LOCK);
+ error = xfs_itobp(mp, NULL, ip, &dip, &ibp, XBF_LOCK);
if (error)
goto fail_iput;
@@ -3517,7 +3452,7 @@ xlog_do_recovery_pass(
int error = 0, h_size;
int bblks, split_bblks;
int hblks, split_hblks, wrapped_hblks;
- xlog_recover_t *rhash[XLOG_RHASH_SIZE];
+ struct hlist_head rhash[XLOG_RHASH_SIZE];
ASSERT(head_blk != tail_blk);
@@ -3978,8 +3913,7 @@ xlog_recover_finish(
* case the unlink transactions would have problems
* pushing the EFIs out of the way.
*/
- xfs_log_force(log->l_mp, (xfs_lsn_t)0,
- (XFS_LOG_FORCE | XFS_LOG_SYNC));
+ xfs_log_force(log->l_mp, XFS_LOG_SYNC);
xlog_recover_process_iunlinks(log);
diff --git a/fs/xfs/xfs_log_recover.h b/fs/xfs/xfs_log_recover.h
index b22545555301..75d749207258 100644
--- a/fs/xfs/xfs_log_recover.h
+++ b/fs/xfs/xfs_log_recover.h
@@ -35,22 +35,21 @@
* item headers are in ri_buf[0]. Additional buffers follow.
*/
typedef struct xlog_recover_item {
- struct xlog_recover_item *ri_next;
- struct xlog_recover_item *ri_prev;
- int ri_type;
- int ri_cnt; /* count of regions found */
- int ri_total; /* total regions */
- xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
+ struct list_head ri_list;
+ int ri_type;
+ int ri_cnt; /* count of regions found */
+ int ri_total; /* total regions */
+ xfs_log_iovec_t *ri_buf; /* ptr to regions buffer */
} xlog_recover_item_t;
struct xlog_tid;
typedef struct xlog_recover {
- struct xlog_recover *r_next;
- xlog_tid_t r_log_tid; /* log's transaction id */
- xfs_trans_header_t r_theader; /* trans header for partial */
- int r_state; /* not needed */
- xfs_lsn_t r_lsn; /* xact lsn */
- xlog_recover_item_t *r_itemq; /* q for items */
+ struct hlist_node r_list;
+ xlog_tid_t r_log_tid; /* log's transaction id */
+ xfs_trans_header_t r_theader; /* trans header for partial */
+ int r_state; /* not needed */
+ xfs_lsn_t r_lsn; /* xact lsn */
+ struct list_head r_itemq; /* q for items */
} xlog_recover_t;
#define ITEM_TYPE(i) (*(ushort *)(i)->ri_buf[0].i_addr)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index eb403b40e120..6afaaeb2950a 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -201,6 +201,38 @@ xfs_uuid_unmount(
/*
+ * Reference counting access wrappers to the perag structures.
+ */
+struct xfs_perag *
+xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno)
+{
+ struct xfs_perag *pag;
+ int ref = 0;
+
+ spin_lock(&mp->m_perag_lock);
+ pag = radix_tree_lookup(&mp->m_perag_tree, agno);
+ if (pag) {
+ ASSERT(atomic_read(&pag->pag_ref) >= 0);
+ /* catch leaks in the positive direction during testing */
+ ASSERT(atomic_read(&pag->pag_ref) < 1000);
+ ref = atomic_inc_return(&pag->pag_ref);
+ }
+ spin_unlock(&mp->m_perag_lock);
+ trace_xfs_perag_get(mp, agno, ref, _RET_IP_);
+ return pag;
+}
+
+void
+xfs_perag_put(struct xfs_perag *pag)
+{
+ int ref;
+
+ ASSERT(atomic_read(&pag->pag_ref) > 0);
+ ref = atomic_dec_return(&pag->pag_ref);
+ trace_xfs_perag_put(pag->pag_mount, pag->pag_agno, ref, _RET_IP_);
+}
+
+/*
* Free up the resources associated with a mount structure. Assume that
* the structure was initially zeroed, so we can tell which fields got
* initialized.
@@ -209,13 +241,16 @@ STATIC void
xfs_free_perag(
xfs_mount_t *mp)
{
- if (mp->m_perag) {
- int agno;
+ xfs_agnumber_t agno;
+ struct xfs_perag *pag;
- for (agno = 0; agno < mp->m_maxagi; agno++)
- if (mp->m_perag[agno].pagb_list)
- kmem_free(mp->m_perag[agno].pagb_list);
- kmem_free(mp->m_perag);
+ for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) {
+ spin_lock(&mp->m_perag_lock);
+ pag = radix_tree_delete(&mp->m_perag_tree, agno);
+ ASSERT(pag);
+ ASSERT(atomic_read(&pag->pag_ref) == 0);
+ spin_unlock(&mp->m_perag_lock);
+ kmem_free(pag);
}
}
@@ -389,22 +424,57 @@ xfs_initialize_perag_icache(
}
}
-xfs_agnumber_t
+int
xfs_initialize_perag(
xfs_mount_t *mp,
- xfs_agnumber_t agcount)
+ xfs_agnumber_t agcount,
+ xfs_agnumber_t *maxagi)
{
xfs_agnumber_t index, max_metadata;
+ xfs_agnumber_t first_initialised = 0;
xfs_perag_t *pag;
xfs_agino_t agino;
xfs_ino_t ino;
xfs_sb_t *sbp = &mp->m_sb;
xfs_ino_t max_inum = XFS_MAXINUMBER_32;
+ int error = -ENOMEM;
/* Check to see if the filesystem can overflow 32 bit inodes */
agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0);
ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino);
+ /*
+ * Walk the current per-ag tree so we don't try to initialise AGs
+ * that already exist (growfs case). Allocate and insert all the
+ * AGs we don't find ready for initialisation.
+ */
+ for (index = 0; index < agcount; index++) {
+ pag = xfs_perag_get(mp, index);
+ if (pag) {
+ xfs_perag_put(pag);
+ continue;
+ }
+ if (!first_initialised)
+ first_initialised = index;
+ pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL);
+ if (!pag)
+ goto out_unwind;
+ if (radix_tree_preload(GFP_NOFS))
+ goto out_unwind;
+ spin_lock(&mp->m_perag_lock);
+ if (radix_tree_insert(&mp->m_perag_tree, index, pag)) {
+ BUG();
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+ error = -EEXIST;
+ goto out_unwind;
+ }
+ pag->pag_agno = index;
+ pag->pag_mount = mp;
+ spin_unlock(&mp->m_perag_lock);
+ radix_tree_preload_end();
+ }
+
/* Clear the mount flag if no inode can overflow 32 bits
* on this filesystem, or if specifically requested..
*/
@@ -438,21 +508,33 @@ xfs_initialize_perag(
}
/* This ag is preferred for inodes */
- pag = &mp->m_perag[index];
+ pag = xfs_perag_get(mp, index);
pag->pagi_inodeok = 1;
if (index < max_metadata)
pag->pagf_metadata = 1;
xfs_initialize_perag_icache(pag);
+ xfs_perag_put(pag);
}
} else {
/* Setup default behavior for smaller filesystems */
for (index = 0; index < agcount; index++) {
- pag = &mp->m_perag[index];
+ pag = xfs_perag_get(mp, index);
pag->pagi_inodeok = 1;
xfs_initialize_perag_icache(pag);
+ xfs_perag_put(pag);
}
}
- return index;
+ if (maxagi)
+ *maxagi = index;
+ return 0;
+
+out_unwind:
+ kmem_free(pag);
+ for (; index > first_initialised; index--) {
+ pag = radix_tree_delete(&mp->m_perag_tree, index);
+ kmem_free(pag);
+ }
+ return error;
}
void
@@ -583,7 +665,7 @@ xfs_readsb(xfs_mount_t *mp, int flags)
* access to the superblock.
*/
sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
- extra_flags = XFS_BUF_LOCK | XFS_BUF_MANAGE | XFS_BUF_MAPPED;
+ extra_flags = XBF_LOCK | XBF_FS_MANAGED | XBF_MAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, XFS_SB_DADDR, BTOBB(sector_size),
extra_flags);
@@ -731,12 +813,13 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
error = xfs_ialloc_pagi_init(mp, NULL, index);
if (error)
return error;
- pag = &mp->m_perag[index];
+ pag = xfs_perag_get(mp, index);
ifree += pag->pagi_freecount;
ialloc += pag->pagi_count;
bfree += pag->pagf_freeblks;
bfreelst += pag->pagf_flcount;
btree += pag->pagf_btreeblks;
+ xfs_perag_put(pag);
}
/*
* Overwrite incore superblock counters with just-read data
@@ -1008,6 +1091,22 @@ xfs_mount_reset_sbqflags(
return xfs_trans_commit(tp, 0);
}
+__uint64_t
+xfs_default_resblks(xfs_mount_t *mp)
+{
+ __uint64_t resblks;
+
+ /*
+ * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
+ * This may drive us straight to ENOSPC on mount, but that implies
+ * we were already there on the last unmount. Warn if this occurs.
+ */
+ resblks = mp->m_sb.sb_dblocks;
+ do_div(resblks, 20);
+ resblks = min_t(__uint64_t, resblks, 1024);
+ return resblks;
+}
+
/*
* This function does the following on an initial mount of a file system:
* - reads the superblock from disk and init the mount struct
@@ -1152,13 +1251,13 @@ xfs_mountfs(
/*
* Allocate and initialize the per-ag data.
*/
- init_rwsem(&mp->m_peraglock);
- mp->m_perag = kmem_zalloc(sbp->sb_agcount * sizeof(xfs_perag_t),
- KM_MAYFAIL);
- if (!mp->m_perag)
+ spin_lock_init(&mp->m_perag_lock);
+ INIT_RADIX_TREE(&mp->m_perag_tree, GFP_NOFS);
+ error = xfs_initialize_perag(mp, sbp->sb_agcount, &mp->m_maxagi);
+ if (error) {
+ cmn_err(CE_WARN, "XFS: Failed per-ag init: %d", error);
goto out_remove_uuid;
-
- mp->m_maxagi = xfs_initialize_perag(mp, sbp->sb_agcount);
+ }
if (!sbp->sb_logblocks) {
cmn_err(CE_WARN, "XFS: no log defined");
@@ -1318,18 +1417,14 @@ xfs_mountfs(
* when at ENOSPC. This is needed for operations like create with
* attr, unwritten extent conversion at ENOSPC, etc. Data allocations
* are not allowed to use this reserved space.
- *
- * We default to 5% or 1024 fsbs of space reserved, whichever is smaller.
- * This may drive us straight to ENOSPC on mount, but that implies
- * we were already there on the last unmount. Warn if this occurs.
*/
- resblks = mp->m_sb.sb_dblocks;
- do_div(resblks, 20);
- resblks = min_t(__uint64_t, resblks, 1024);
- error = xfs_reserve_blocks(mp, &resblks, NULL);
- if (error)
- cmn_err(CE_WARN, "XFS: Unable to allocate reserve blocks. "
- "Continuing without a reserve pool.");
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ resblks = xfs_default_resblks(mp);
+ error = xfs_reserve_blocks(mp, &resblks, NULL);
+ if (error)
+ cmn_err(CE_WARN, "XFS: Unable to allocate reserve "
+ "blocks. Continuing without a reserve pool.");
+ }
return 0;
@@ -1372,8 +1467,19 @@ xfs_unmountfs(
* push out the iclog we will never get that unlocked. hence we
* need to force the log first.
*/
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
- xfs_reclaim_inodes(mp, XFS_IFLUSH_ASYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
+
+ /*
+ * Do a delwri reclaim pass first so that as many dirty inodes are
+ * queued up for IO as possible. Then flush the buffers before making
+ * a synchronous path to catch all the remaining inodes are reclaimed.
+ * This makes the reclaim process as quick as possible by avoiding
+ * synchronous writeout and blocking on inodes already in the delwri
+ * state as much as possible.
+ */
+ xfs_reclaim_inodes(mp, 0);
+ XFS_bflush(mp->m_ddev_targp);
+ xfs_reclaim_inodes(mp, SYNC_WAIT);
xfs_qm_unmount(mp);
@@ -1382,7 +1488,7 @@ xfs_unmountfs(
* that nothing is pinned. This is important because bflush()
* will skip pinned buffers.
*/
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC);
+ xfs_log_force(mp, XFS_LOG_SYNC);
xfs_binval(mp->m_ddev_targp);
if (mp->m_rtdev_targp) {
@@ -1548,15 +1654,14 @@ xfs_mod_sb(xfs_trans_t *tp, __int64_t fields)
xfs_sb_to_disk(XFS_BUF_TO_SBP(bp), &mp->m_sb, fields);
/* find modified range */
+ f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
+ ASSERT((1LL << f) & XFS_SB_MOD_BITS);
+ last = xfs_sb_info[f + 1].offset - 1;
f = (xfs_sb_field_t)xfs_lowbit64((__uint64_t)fields);
ASSERT((1LL << f) & XFS_SB_MOD_BITS);
first = xfs_sb_info[f].offset;
- f = (xfs_sb_field_t)xfs_highbit64((__uint64_t)fields);
- ASSERT((1LL << f) & XFS_SB_MOD_BITS);
- last = xfs_sb_info[f + 1].offset - 1;
-
xfs_trans_log_buf(tp, bp, first, last);
}
@@ -1887,7 +1992,7 @@ xfs_getsb(
ASSERT(mp->m_sb_bp != NULL);
bp = mp->m_sb_bp;
- if (flags & XFS_BUF_TRYLOCK) {
+ if (flags & XBF_TRYLOCK) {
if (!XFS_BUF_CPSEMA(bp)) {
return NULL;
}
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 1df7e4502967..70504fcf14cd 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -78,7 +78,8 @@ typedef int (*xfs_send_destroy_t)(struct xfs_inode *, dm_right_t);
typedef int (*xfs_send_namesp_t)(dm_eventtype_t, struct xfs_mount *,
struct xfs_inode *, dm_right_t,
struct xfs_inode *, dm_right_t,
- const char *, const char *, mode_t, int, int);
+ const unsigned char *, const unsigned char *,
+ mode_t, int, int);
typedef int (*xfs_send_mount_t)(struct xfs_mount *, dm_right_t,
char *, char *);
typedef void (*xfs_send_unmount_t)(struct xfs_mount *, struct xfs_inode *,
@@ -207,8 +208,8 @@ typedef struct xfs_mount {
uint m_ag_maxlevels; /* XFS_AG_MAXLEVELS */
uint m_bm_maxlevels[2]; /* XFS_BM_MAXLEVELS */
uint m_in_maxlevels; /* max inobt btree levels. */
- struct xfs_perag *m_perag; /* per-ag accounting info */
- struct rw_semaphore m_peraglock; /* lock for m_perag (pointer) */
+ struct radix_tree_root m_perag_tree; /* per-ag accounting info */
+ spinlock_t m_perag_lock; /* lock for m_perag_tree */
struct mutex m_growlock; /* growfs mutex */
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_dmevmask; /* DMI events for this FS */
@@ -224,6 +225,7 @@ typedef struct xfs_mount {
__uint64_t m_maxioffset; /* maximum inode offset */
__uint64_t m_resblks; /* total reserved blocks */
__uint64_t m_resblks_avail;/* available reserved blocks */
+ __uint64_t m_resblks_save; /* reserved blks @ remount,ro */
int m_dalign; /* stripe unit */
int m_swidth; /* stripe width */
int m_sinoalign; /* stripe unit inode alignment */
@@ -384,19 +386,10 @@ xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
}
/*
- * perag get/put wrappers for eventual ref counting
+ * perag get/put wrappers for ref counting
*/
-static inline xfs_perag_t *
-xfs_get_perag(struct xfs_mount *mp, xfs_ino_t ino)
-{
- return &mp->m_perag[XFS_INO_TO_AGNO(mp, ino)];
-}
-
-static inline void
-xfs_put_perag(struct xfs_mount *mp, xfs_perag_t *pag)
-{
- /* nothing to see here, move along */
-}
+struct xfs_perag *xfs_perag_get(struct xfs_mount *mp, xfs_agnumber_t agno);
+void xfs_perag_put(struct xfs_perag *pag);
/*
* Per-cpu superblock locking functions
@@ -428,6 +421,7 @@ typedef struct xfs_mod_sb {
} xfs_mod_sb_t;
extern int xfs_log_sbcount(xfs_mount_t *, uint);
+extern __uint64_t xfs_default_resblks(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp);
extern void xfs_unmountfs(xfs_mount_t *);
@@ -450,7 +444,8 @@ extern struct xfs_dmops xfs_dmcore_xfs;
#endif /* __KERNEL__ */
extern void xfs_mod_sb(struct xfs_trans *, __int64_t);
-extern xfs_agnumber_t xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t);
+extern int xfs_initialize_perag(struct xfs_mount *, xfs_agnumber_t,
+ xfs_agnumber_t *);
extern void xfs_sb_from_disk(struct xfs_sb *, struct xfs_dsb *);
extern void xfs_sb_to_disk(struct xfs_dsb *, struct xfs_sb *, __int64_t);
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index 4b0613d99faa..45ce15dc5b2b 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -398,7 +398,7 @@ exit:
* guaranteed that all the free functions for all the elements have finished
* executing and the reaper is not running.
*/
-void
+static void
xfs_mru_cache_flush(
xfs_mru_cache_t *mru)
{
diff --git a/fs/xfs/xfs_mru_cache.h b/fs/xfs/xfs_mru_cache.h
index 5d439f34b0c9..36dd3ec8b4eb 100644
--- a/fs/xfs/xfs_mru_cache.h
+++ b/fs/xfs/xfs_mru_cache.h
@@ -42,7 +42,6 @@ void xfs_mru_cache_uninit(void);
int xfs_mru_cache_create(struct xfs_mru_cache **mrup, unsigned int lifetime_ms,
unsigned int grp_count,
xfs_mru_cache_free_func_t free_func);
-void xfs_mru_cache_flush(xfs_mru_cache_t *mru);
void xfs_mru_cache_destroy(struct xfs_mru_cache *mru);
int xfs_mru_cache_insert(struct xfs_mru_cache *mru, unsigned long key,
void *value);
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 91bfd60f4c74..fdcab3f81dde 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -223,16 +223,9 @@ typedef struct xfs_qoff_logformat {
#define XFS_QMOPT_RES_INOS 0x0800000
/*
- * flags for dqflush and dqflush_all.
- */
-#define XFS_QMOPT_SYNC 0x1000000
-#define XFS_QMOPT_ASYNC 0x2000000
-#define XFS_QMOPT_DELWRI 0x4000000
-
-/*
* flags for dqalloc.
*/
-#define XFS_QMOPT_INHERIT 0x8000000
+#define XFS_QMOPT_INHERIT 0x1000000
/*
* flags to xfs_trans_mod_dquot.
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 9e15a1185362..6be05f756d59 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1517,6 +1517,8 @@ xfs_rtfree_range(
*/
error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
&postblock);
+ if (error)
+ return error;
/*
* If there are blocks not being freed at the front of the
* old extent, add summary data for them to be allocated.
diff --git a/fs/xfs/xfs_rw.c b/fs/xfs/xfs_rw.c
index 5aa07caea5f1..e336742a58a4 100644
--- a/fs/xfs/xfs_rw.c
+++ b/fs/xfs/xfs_rw.c
@@ -47,48 +47,6 @@
#include "xfs_trace.h"
/*
- * This is a subroutine for xfs_write() and other writers (xfs_ioctl)
- * which clears the setuid and setgid bits when a file is written.
- */
-int
-xfs_write_clear_setuid(
- xfs_inode_t *ip)
-{
- xfs_mount_t *mp;
- xfs_trans_t *tp;
- int error;
-
- mp = ip->i_mount;
- tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
- if ((error = xfs_trans_reserve(tp, 0,
- XFS_WRITEID_LOG_RES(mp),
- 0, 0, 0))) {
- xfs_trans_cancel(tp, 0);
- return error;
- }
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- xfs_trans_ihold(tp, ip);
- ip->i_d.di_mode &= ~S_ISUID;
-
- /*
- * Note that we don't have to worry about mandatory
- * file locking being disabled here because we only
- * clear the S_ISGID bit if the Group execute bit is
- * on, but if it was on then mandatory locking wouldn't
- * have been enabled.
- */
- if (ip->i_d.di_mode & S_IXGRP) {
- ip->i_d.di_mode &= ~S_ISGID;
- }
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return 0;
-}
-
-/*
* Force a shutdown of the filesystem instantly while keeping
* the filesystem consistent. We don't do an unmount here; just shutdown
* the shop, make sure that absolutely nothing persistent happens to
@@ -153,88 +111,6 @@ xfs_do_force_shutdown(
}
}
-
-/*
- * Called when we want to stop a buffer from getting written or read.
- * We attach the EIO error, muck with its flags, and call biodone
- * so that the proper iodone callbacks get called.
- */
-int
-xfs_bioerror(
- xfs_buf_t *bp)
-{
-
-#ifdef XFSERRORDEBUG
- ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
-#endif
-
- /*
- * No need to wait until the buffer is unpinned.
- * We aren't flushing it.
- */
- XFS_BUF_ERROR(bp, EIO);
- /*
- * We're calling biodone, so delete B_DONE flag. Either way
- * we have to call the iodone callback, and calling biodone
- * probably is the best way since it takes care of
- * GRIO as well.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_UNDELAYWRITE(bp);
- XFS_BUF_UNDONE(bp);
- XFS_BUF_STALE(bp);
-
- XFS_BUF_CLR_BDSTRAT_FUNC(bp);
- xfs_biodone(bp);
-
- return (EIO);
-}
-
-/*
- * Same as xfs_bioerror, except that we are releasing the buffer
- * here ourselves, and avoiding the biodone call.
- * This is meant for userdata errors; metadata bufs come with
- * iodone functions attached, so that we can track down errors.
- */
-int
-xfs_bioerror_relse(
- xfs_buf_t *bp)
-{
- int64_t fl;
-
- ASSERT(XFS_BUF_IODONE_FUNC(bp) != xfs_buf_iodone_callbacks);
- ASSERT(XFS_BUF_IODONE_FUNC(bp) != xlog_iodone);
-
- fl = XFS_BUF_BFLAGS(bp);
- /*
- * No need to wait until the buffer is unpinned.
- * We aren't flushing it.
- *
- * chunkhold expects B_DONE to be set, whether
- * we actually finish the I/O or not. We don't want to
- * change that interface.
- */
- XFS_BUF_UNREAD(bp);
- XFS_BUF_UNDELAYWRITE(bp);
- XFS_BUF_DONE(bp);
- XFS_BUF_STALE(bp);
- XFS_BUF_CLR_IODONE_FUNC(bp);
- XFS_BUF_CLR_BDSTRAT_FUNC(bp);
- if (!(fl & XFS_B_ASYNC)) {
- /*
- * Mark b_error and B_ERROR _both_.
- * Lot's of chunkcache code assumes that.
- * There's no reason to mark error for
- * ASYNC buffers.
- */
- XFS_BUF_ERROR(bp, EIO);
- XFS_BUF_FINISH_IOWAIT(bp);
- } else {
- xfs_buf_relse(bp);
- }
- return (EIO);
-}
-
/*
* Prints out an ALERT message about I/O error.
*/
@@ -306,37 +182,6 @@ xfs_read_buf(
}
/*
- * Wrapper around bwrite() so that we can trap
- * write errors, and act accordingly.
- */
-int
-xfs_bwrite(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- int error;
-
- /*
- * XXXsup how does this work for quotas.
- */
- XFS_BUF_SET_BDSTRAT_FUNC(bp, xfs_bdstrat_cb);
- bp->b_mount = mp;
- XFS_BUF_WRITE(bp);
-
- if ((error = XFS_bwrite(bp))) {
- ASSERT(mp);
- /*
- * Cannot put a buftrace here since if the buffer is not
- * B_HOLD then we will brelse() the buffer before returning
- * from bwrite and we could be tracing a buffer that has
- * been reused.
- */
- xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
- }
- return (error);
-}
-
-/*
* helper function to extract extent size hint from inode
*/
xfs_extlen_t
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index 571f2174435c..11c41ec6ed75 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -39,10 +39,6 @@ xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
/*
* Prototypes for functions in xfs_rw.c.
*/
-extern int xfs_write_clear_setuid(struct xfs_inode *ip);
-extern int xfs_bwrite(struct xfs_mount *mp, struct xfs_buf *bp);
-extern int xfs_bioerror(struct xfs_buf *bp);
-extern int xfs_bioerror_relse(struct xfs_buf *bp);
extern int xfs_read_buf(struct xfs_mount *mp, xfs_buftarg_t *btp,
xfs_daddr_t blkno, int len, uint flags,
struct xfs_buf **bpp);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 237badcbac3b..be942d4e3324 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -981,9 +981,8 @@ shut_us_down:
*/
if (sync) {
if (!error) {
- error = _xfs_log_force(mp, commit_lsn,
- XFS_LOG_FORCE | XFS_LOG_SYNC,
- log_flushed);
+ error = _xfs_log_force_lsn(mp, commit_lsn,
+ XFS_LOG_SYNC, log_flushed);
}
XFS_STATS_INC(xs_trans_sync);
} else {
@@ -1121,7 +1120,7 @@ xfs_trans_fill_vecs(
tp->t_header.th_num_items = nitems;
log_vector->i_addr = (xfs_caddr_t)&tp->t_header;
log_vector->i_len = sizeof(xfs_trans_header_t);
- XLOG_VEC_SET_TYPE(log_vector, XLOG_REG_TYPE_TRANSHDR);
+ log_vector->i_type = XLOG_REG_TYPE_TRANSHDR;
}
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index ca64f33c63a3..c93e3a102857 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -861,8 +861,7 @@ typedef struct xfs_item_ops {
#define XFS_ITEM_SUCCESS 0
#define XFS_ITEM_PINNED 1
#define XFS_ITEM_LOCKED 2
-#define XFS_ITEM_FLUSHING 3
-#define XFS_ITEM_PUSHBUF 4
+#define XFS_ITEM_PUSHBUF 3
/*
* This structure is used to maintain a list of block ranges that have been
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 2ffc570679be..e799824f7245 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -237,14 +237,15 @@ out:
}
/*
- * Function that does the work of pushing on the AIL
+ * xfsaild_push does the work of pushing on the AIL. Returning a timeout of
+ * zero indicates that the caller should sleep until woken.
*/
long
xfsaild_push(
struct xfs_ail *ailp,
xfs_lsn_t *last_lsn)
{
- long tout = 1000; /* milliseconds */
+ long tout = 0;
xfs_lsn_t last_pushed_lsn = *last_lsn;
xfs_lsn_t target = ailp->xa_target;
xfs_lsn_t lsn;
@@ -252,6 +253,7 @@ xfsaild_push(
int flush_log, count, stuck;
xfs_mount_t *mp = ailp->xa_mount;
struct xfs_ail_cursor *cur = &ailp->xa_cursors;
+ int push_xfsbufd = 0;
spin_lock(&ailp->xa_lock);
xfs_trans_ail_cursor_init(ailp, cur);
@@ -262,7 +264,7 @@ xfsaild_push(
*/
xfs_trans_ail_cursor_done(ailp, cur);
spin_unlock(&ailp->xa_lock);
- last_pushed_lsn = 0;
+ *last_lsn = 0;
return tout;
}
@@ -279,7 +281,6 @@ xfsaild_push(
* prevents use from spinning when we can't do anything or there is
* lots of contention on the AIL lists.
*/
- tout = 10;
lsn = lip->li_lsn;
flush_log = stuck = count = 0;
while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) {
@@ -308,6 +309,7 @@ xfsaild_push(
XFS_STATS_INC(xs_push_ail_pushbuf);
IOP_PUSHBUF(lip);
last_pushed_lsn = lsn;
+ push_xfsbufd = 1;
break;
case XFS_ITEM_PINNED:
@@ -322,12 +324,6 @@ xfsaild_push(
stuck++;
break;
- case XFS_ITEM_FLUSHING:
- XFS_STATS_INC(xs_push_ail_flushing);
- last_pushed_lsn = lsn;
- stuck++;
- break;
-
default:
ASSERT(0);
break;
@@ -371,19 +367,24 @@ xfsaild_push(
* move forward in the AIL.
*/
XFS_STATS_INC(xs_push_ail_flush);
- xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
+ xfs_log_force(mp, 0);
+ }
+
+ if (push_xfsbufd) {
+ /* we've got delayed write buffers to flush */
+ wake_up_process(mp->m_ddev_targp->bt_task);
}
if (!count) {
/* We're past our target or empty, so idle */
- tout = 1000;
+ last_pushed_lsn = 0;
} else if (XFS_LSN_CMP(lsn, target) >= 0) {
/*
* We reached the target so wait a bit longer for I/O to
* complete and remove pushed items from the AIL before we
* start the next scan from the start of the AIL.
*/
- tout += 20;
+ tout = 50;
last_pushed_lsn = 0;
} else if ((stuck * 100) / count > 90) {
/*
@@ -395,11 +396,14 @@ xfsaild_push(
* Backoff a bit more to allow some I/O to complete before
* continuing from where we were.
*/
- tout += 10;
+ tout = 20;
+ } else {
+ /* more to do, but wait a short while before continuing */
+ tout = 10;
}
*last_lsn = last_pushed_lsn;
return tout;
-} /* xfsaild_push */
+}
/*
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index 49130628d5ef..5ffd544434eb 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -75,13 +75,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
xfs_buf_log_item_t *bip;
if (flags == 0)
- flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+ flags = XBF_LOCK | XBF_MAPPED;
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL)
- return xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
+ return xfs_buf_get(target_dev, blkno, len,
+ flags | XBF_DONT_BLOCK);
/*
* If we find the buffer in the cache with this transaction
@@ -117,14 +118,14 @@ xfs_trans_get_buf(xfs_trans_t *tp,
}
/*
- * We always specify the BUF_BUSY flag within a transaction so
- * that get_buf does not try to push out a delayed write buffer
+ * We always specify the XBF_DONT_BLOCK flag within a transaction
+ * so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
- bp = xfs_buf_get(target_dev, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_get(target_dev, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) {
return NULL;
}
@@ -290,15 +291,15 @@ xfs_trans_read_buf(
int error;
if (flags == 0)
- flags = XFS_BUF_LOCK | XFS_BUF_MAPPED;
+ flags = XBF_LOCK | XBF_MAPPED;
/*
* Default to a normal get_buf() call if the tp is NULL.
*/
if (tp == NULL) {
- bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (!bp)
- return (flags & XFS_BUF_TRYLOCK) ?
+ return (flags & XBF_TRYLOCK) ?
EAGAIN : XFS_ERROR(ENOMEM);
if (XFS_BUF_GETERROR(bp) != 0) {
@@ -385,14 +386,14 @@ xfs_trans_read_buf(
}
/*
- * We always specify the BUF_BUSY flag within a transaction so
- * that get_buf does not try to push out a delayed write buffer
+ * We always specify the XBF_DONT_BLOCK flag within a transaction
+ * so that get_buf does not try to push out a delayed write buffer
* which might cause another transaction to take place (if the
* buffer was delayed alloc). Such recursive transactions can
* easily deadlock with our current transaction as well as cause
* us to run out of stack space.
*/
- bp = xfs_buf_read(target, blkno, len, flags | BUF_BUSY);
+ bp = xfs_buf_read(target, blkno, len, flags | XBF_DONT_BLOCK);
if (bp == NULL) {
*bpp = NULL;
return 0;
@@ -472,8 +473,8 @@ shutdown_abort:
if (XFS_BUF_ISSTALE(bp) && XFS_BUF_ISDELAYWRITE(bp))
cmn_err(CE_NOTE, "about to pop assert, bp == 0x%p", bp);
#endif
- ASSERT((XFS_BUF_BFLAGS(bp) & (XFS_B_STALE|XFS_B_DELWRI)) !=
- (XFS_B_STALE|XFS_B_DELWRI));
+ ASSERT((XFS_BUF_BFLAGS(bp) & (XBF_STALE|XBF_DELWRI)) !=
+ (XBF_STALE|XBF_DELWRI));
trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_types.h b/fs/xfs/xfs_types.h
index d725428c9df6..b09904555d07 100644
--- a/fs/xfs/xfs_types.h
+++ b/fs/xfs/xfs_types.h
@@ -151,8 +151,8 @@ typedef enum {
} xfs_btnum_t;
struct xfs_name {
- const char *name;
- int len;
+ const unsigned char *name;
+ int len;
};
#endif /* __XFS_TYPES_H__ */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 6f268756bf36..ddd2c5d1b854 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -256,7 +256,7 @@ xfs_setattr(
iattr->ia_size > ip->i_d.di_size) {
code = xfs_flush_pages(ip,
ip->i_d.di_size, iattr->ia_size,
- XFS_B_ASYNC, FI_NONE);
+ XBF_ASYNC, FI_NONE);
}
/* wait for all I/O to complete */
@@ -597,7 +597,7 @@ xfs_fsync(
{
xfs_trans_t *tp;
int error = 0;
- int log_flushed = 0, changed = 1;
+ int log_flushed = 0;
xfs_itrace_entry(ip);
@@ -627,19 +627,16 @@ xfs_fsync(
* disk yet, the inode will be still be pinned. If it is,
* force the log.
*/
-
xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
if (xfs_ipincount(ip)) {
- error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
- XFS_LOG_FORCE | XFS_LOG_SYNC,
- &log_flushed);
- } else {
- /*
- * If the inode is not pinned and nothing has changed
- * we don't need to flush the cache.
- */
- changed = 0;
+ if (ip->i_itemp->ili_last_lsn) {
+ error = _xfs_log_force_lsn(ip->i_mount,
+ ip->i_itemp->ili_last_lsn,
+ XFS_LOG_SYNC, &log_flushed);
+ } else {
+ error = _xfs_log_force(ip->i_mount,
+ XFS_LOG_SYNC, &log_flushed);
+ }
}
} else {
/*
@@ -674,7 +671,7 @@ xfs_fsync(
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- if ((ip->i_mount->m_flags & XFS_MOUNT_BARRIER) && changed) {
+ if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) {
/*
* If the log write didn't issue an ordered tag we need
* to flush the disk cache for the data device now.
@@ -1096,7 +1093,7 @@ xfs_release(
*/
truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
if (truncated && VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
- xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
+ xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
}
if (ip->i_d.di_nlink != 0) {
@@ -2199,7 +2196,8 @@ xfs_symlink(
if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
- link_name->name, target_path, 0, 0, 0);
+ link_name->name,
+ (unsigned char *)target_path, 0, 0, 0);
if (error)
return error;
}
@@ -2395,7 +2393,8 @@ std_return:
dp, DM_RIGHT_NULL,
error ? NULL : ip,
DM_RIGHT_NULL, link_name->name,
- target_path, 0, error, 0);
+ (unsigned char *)target_path,
+ 0, error, 0);
}
if (!error)
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 167a467403a5..774f40729ca1 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -43,11 +43,11 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd,
int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
struct xfs_inode *src_ip, struct xfs_inode *target_dp,
struct xfs_name *target_name, struct xfs_inode *target_ip);
-int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value,
- int *valuelenp, int flags);
-int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value,
- int valuelen, int flags);
-int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags);
+int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
+ unsigned char *value, int *valuelenp, int flags);
+int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
+ unsigned char *value, int valuelen, int flags);
+int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb,