diff options
Diffstat (limited to 'fs')
40 files changed, 396 insertions, 939 deletions
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig index 524021ff5436..3f53dd101f99 100644 --- a/fs/xfs/Kconfig +++ b/fs/xfs/Kconfig @@ -64,3 +64,16 @@ config XFS_RT See the xfs man page in section 5 for additional information. If unsure, say N. + +config XFS_DEBUG + bool "XFS Debugging support (EXPERIMENTAL)" + depends on XFS_FS && EXPERIMENTAL + help + Say Y here to get an XFS build with many debugging features, + including ASSERT checks, function wrappers around macros, + and extra sanity-checking functions in various code paths. + + Note that the resulting code will be HUGE and SLOW, and probably + not useful unless you are debugging a particular problem. + + Say N unless you are an XFS developer, or you play one on TV. diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/linux-2.6/mrlock.h index c110bb002665..ff6a19873e5c 100644 --- a/fs/xfs/linux-2.6/mrlock.h +++ b/fs/xfs/linux-2.6/mrlock.h @@ -20,29 +20,24 @@ #include <linux/rwsem.h> -enum { MR_NONE, MR_ACCESS, MR_UPDATE }; - typedef struct { struct rw_semaphore mr_lock; +#ifdef DEBUG int mr_writer; +#endif } mrlock_t; +#ifdef DEBUG #define mrinit(mrp, name) \ do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0) +#else +#define mrinit(mrp, name) \ + do { init_rwsem(&(mrp)->mr_lock); } while (0) +#endif + #define mrlock_init(mrp, t,n,s) mrinit(mrp, n) #define mrfree(mrp) do { } while (0) -static inline void mraccess(mrlock_t *mrp) -{ - down_read(&mrp->mr_lock); -} - -static inline void mrupdate(mrlock_t *mrp) -{ - down_write(&mrp->mr_lock); - mrp->mr_writer = 1; -} - static inline void mraccess_nested(mrlock_t *mrp, int subclass) { down_read_nested(&mrp->mr_lock, subclass); @@ -51,10 +46,11 @@ static inline void mraccess_nested(mrlock_t *mrp, int subclass) static inline void mrupdate_nested(mrlock_t *mrp, int subclass) { down_write_nested(&mrp->mr_lock, subclass); +#ifdef DEBUG mrp->mr_writer = 1; +#endif } - static inline int mrtryaccess(mrlock_t *mrp) { return down_read_trylock(&mrp->mr_lock); @@ -64,39 +60,31 @@ static inline int mrtryupdate(mrlock_t *mrp) { if (!down_write_trylock(&mrp->mr_lock)) return 0; +#ifdef DEBUG mrp->mr_writer = 1; +#endif return 1; } -static inline void mrunlock(mrlock_t *mrp) +static inline void mrunlock_excl(mrlock_t *mrp) { - if (mrp->mr_writer) { - mrp->mr_writer = 0; - up_write(&mrp->mr_lock); - } else { - up_read(&mrp->mr_lock); - } +#ifdef DEBUG + mrp->mr_writer = 0; +#endif + up_write(&mrp->mr_lock); } -static inline void mrdemote(mrlock_t *mrp) +static inline void mrunlock_shared(mrlock_t *mrp) { - mrp->mr_writer = 0; - downgrade_write(&mrp->mr_lock); + up_read(&mrp->mr_lock); } -#ifdef DEBUG -/* - * Debug-only routine, without some platform-specific asm code, we can - * now only answer requests regarding whether we hold the lock for write - * (reader state is outside our visibility, we only track writer state). - * Note: means !ismrlocked would give false positives, so don't do that. - */ -static inline int ismrlocked(mrlock_t *mrp, int type) +static inline void mrdemote(mrlock_t *mrp) { - if (mrp && type == MR_UPDATE) - return mrp->mr_writer; - return 1; -} +#ifdef DEBUG + mrp->mr_writer = 0; #endif + downgrade_write(&mrp->mr_lock); +} #endif /* __XFS_SUPPORT_MRLOCK_H__ */ diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c index 52f6846101d5..5105015a75ad 100644 --- a/fs/xfs/linux-2.6/xfs_buf.c +++ b/fs/xfs/linux-2.6/xfs_buf.c @@ -886,7 +886,7 @@ int xfs_buf_lock_value( xfs_buf_t *bp) { - return atomic_read(&bp->b_sema.count); + return bp->b_sema.count; } #endif diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index 265f0168ab76..c672b3238b14 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c @@ -133,7 +133,7 @@ xfs_nfs_get_inode( if (!ip) return ERR_PTR(-EIO); - if (!ip->i_d.di_mode || ip->i_d.di_gen != generation) { + if (ip->i_d.di_gen != generation) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return ERR_PTR(-ENOENT); } diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c index 05905246434d..65e78c13d4ae 100644 --- a/fs/xfs/linux-2.6/xfs_file.c +++ b/fs/xfs/linux-2.6/xfs_file.c @@ -43,9 +43,6 @@ #include <linux/smp_lock.h> static struct vm_operations_struct xfs_file_vm_ops; -#ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct xfs_dmapi_file_vm_ops; -#endif STATIC_INLINE ssize_t __xfs_file_read( @@ -202,22 +199,6 @@ xfs_file_fsync( (xfs_off_t)0, (xfs_off_t)-1); } -#ifdef CONFIG_XFS_DMAPI -STATIC int -xfs_vm_fault( - struct vm_area_struct *vma, - struct vm_fault *vmf) -{ - struct inode *inode = vma->vm_file->f_path.dentry->d_inode; - bhv_vnode_t *vp = vn_from_inode(inode); - - ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI); - if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0)) - return VM_FAULT_SIGBUS; - return filemap_fault(vma, vmf); -} -#endif /* CONFIG_XFS_DMAPI */ - /* * Unfortunately we can't just use the clean and simple readdir implementation * below, because nfs might call back into ->lookup from the filldir callback @@ -386,11 +367,6 @@ xfs_file_mmap( vma->vm_ops = &xfs_file_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; -#ifdef CONFIG_XFS_DMAPI - if (XFS_M(filp->f_path.dentry->d_inode->i_sb)->m_flags & XFS_MOUNT_DMAPI) - vma->vm_ops = &xfs_dmapi_file_vm_ops; -#endif /* CONFIG_XFS_DMAPI */ - file_accessed(filp); return 0; } @@ -437,47 +413,6 @@ xfs_file_ioctl_invis( return error; } -#ifdef CONFIG_XFS_DMAPI -#ifdef HAVE_VMOP_MPROTECT -STATIC int -xfs_vm_mprotect( - struct vm_area_struct *vma, - unsigned int newflags) -{ - struct inode *inode = vma->vm_file->f_path.dentry->d_inode; - struct xfs_mount *mp = XFS_M(inode->i_sb); - int error = 0; - - if (mp->m_flags & XFS_MOUNT_DMAPI) { - if ((vma->vm_flags & VM_MAYSHARE) && - (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE)) - error = XFS_SEND_MMAP(mp, vma, VM_WRITE); - } - return error; -} -#endif /* HAVE_VMOP_MPROTECT */ -#endif /* CONFIG_XFS_DMAPI */ - -#ifdef HAVE_FOP_OPEN_EXEC -/* If the user is attempting to execute a file that is offline then - * we have to trigger a DMAPI READ event before the file is marked as busy - * otherwise the invisible I/O will not be able to write to the file to bring - * it back online. - */ -STATIC int -xfs_file_open_exec( - struct inode *inode) -{ - struct xfs_mount *mp = XFS_M(inode->i_sb); - struct xfs_inode *ip = XFS_I(inode); - - if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI) && - DM_EVENT_ENABLED(ip, DM_EVENT_READ)) - return -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL); - return 0; -} -#endif /* HAVE_FOP_OPEN_EXEC */ - /* * mmap()d file has taken write protection fault and is being made * writable. We can set the page state up correctly for a writable @@ -546,13 +481,3 @@ static struct vm_operations_struct xfs_file_vm_ops = { .fault = filemap_fault, .page_mkwrite = xfs_vm_page_mkwrite, }; - -#ifdef CONFIG_XFS_DMAPI -static struct vm_operations_struct xfs_dmapi_file_vm_ops = { - .fault = xfs_vm_fault, - .page_mkwrite = xfs_vm_page_mkwrite, -#ifdef HAVE_VMOP_MPROTECT - .mprotect = xfs_vm_mprotect, -#endif -}; -#endif /* CONFIG_XFS_DMAPI */ diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index 4ddb86b73c6b..a42ba9d71156 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c @@ -238,7 +238,7 @@ xfs_vget_fsop_handlereq( return error; if (ip == NULL) return XFS_ERROR(EIO); - if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) { + if (ip->i_d.di_gen != igen) { xfs_iput_new(ip, XFS_ILOCK_SHARED); return XFS_ERROR(ENOENT); } @@ -505,14 +505,14 @@ xfs_attrmulti_attr_get( { char *kbuf; int error = EFAULT; - + if (*len > XATTR_SIZE_MAX) return EINVAL; kbuf = kmalloc(*len, GFP_KERNEL); if (!kbuf) return ENOMEM; - error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL); + error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags); if (error) goto out_kfree; @@ -546,7 +546,7 @@ xfs_attrmulti_attr_set( if (copy_from_user(kbuf, ubuf, len)) goto out_kfree; - + error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags); out_kfree: diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c index a1237dad6430..2bf287ef5489 100644 --- a/fs/xfs/linux-2.6/xfs_iops.c +++ b/fs/xfs/linux-2.6/xfs_iops.c @@ -511,7 +511,8 @@ xfs_vn_rename( xfs_dentry_to_name(&nname, ndentry); error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode), - XFS_I(ndir), &nname); + XFS_I(ndir), &nname, new_inode ? + XFS_I(new_inode) : NULL); if (likely(!error)) { if (new_inode) xfs_validate_fields(new_inode); diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h index e5143323e71f..1bc9f600365f 100644 --- a/fs/xfs/linux-2.6/xfs_linux.h +++ b/fs/xfs/linux-2.6/xfs_linux.h @@ -99,7 +99,6 @@ /* * Feature macros (disable/enable) */ -#define HAVE_SPLICE /* a splice(2) exists in 2.6, but not in 2.4 */ #ifdef CONFIG_SMP #define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */ #else diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c index 1ebd8004469c..5e3b57516ec7 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.c +++ b/fs/xfs/linux-2.6/xfs_lrw.c @@ -394,7 +394,7 @@ xfs_zero_last_block( int error = 0; xfs_bmbt_irec_t imap; - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); zero_offset = XFS_B_FSB_OFFSET(mp, isize); if (zero_offset == 0) { @@ -425,14 +425,14 @@ xfs_zero_last_block( * out sync. We need to drop the ilock while we do this so we * don't deadlock when the buffer cache calls back to us. */ - xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD); + xfs_iunlock(ip, XFS_ILOCK_EXCL); zero_len = mp->m_sb.sb_blocksize - zero_offset; if (isize + zero_len > offset) zero_len = offset - isize; error = xfs_iozero(ip, isize, zero_len); - xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + xfs_ilock(ip, XFS_ILOCK_EXCL); ASSERT(error >= 0); return error; } @@ -465,8 +465,7 @@ xfs_zero_eof( int error = 0; xfs_bmbt_irec_t imap; - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); - ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); ASSERT(offset > isize); /* @@ -475,8 +474,7 @@ xfs_zero_eof( */ error = xfs_zero_last_block(ip, offset, isize); if (error) { - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); - ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); return error; } @@ -507,8 +505,7 @@ xfs_zero_eof( error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, 0, NULL, 0, &imap, &nimaps, NULL, NULL); if (error) { - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); - ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); return error; } ASSERT(nimaps > 0); @@ -532,7 +529,7 @@ xfs_zero_eof( * Drop the inode lock while we're doing the I/O. * We'll still have the iolock to protect us. */ - xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + xfs_iunlock(ip, XFS_ILOCK_EXCL); zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); @@ -548,13 +545,13 @@ xfs_zero_eof( start_zero_fsb = imap.br_startoff + imap.br_blockcount; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); - xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + xfs_ilock(ip, XFS_ILOCK_EXCL); } return 0; out_lock: - xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD); + xfs_ilock(ip, XFS_ILOCK_EXCL); ASSERT(error >= 0); return error; } diff --git a/fs/xfs/linux-2.6/xfs_lrw.h b/fs/xfs/linux-2.6/xfs_lrw.h index e1d498b4ba7a..e6be37dbd0e9 100644 --- a/fs/xfs/linux-2.6/xfs_lrw.h +++ b/fs/xfs/linux-2.6/xfs_lrw.h @@ -50,7 +50,6 @@ struct xfs_iomap; #define XFS_INVAL_CACHED 18 #define XFS_DIORD_ENTER 19 #define XFS_DIOWR_ENTER 20 -#define XFS_SENDFILE_ENTER 21 #define XFS_WRITEPAGE_ENTER 22 #define XFS_RELEASEPAGE_ENTER 23 #define XFS_INVALIDPAGE_ENTER 24 diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 865eb708aa95..742b2c7852c1 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c @@ -1181,7 +1181,7 @@ xfs_fs_statfs( statp->f_fsid.val[0] = (u32)id; statp->f_fsid.val[1] = (u32)(id >> 32); - xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); + xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); spin_lock(&mp->m_sb_lock); statp->f_bsize = sbp->sb_blocksize; diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h index 8b4d63ce8694..9d73cb5c0fc7 100644 --- a/fs/xfs/linux-2.6/xfs_vnode.h +++ b/fs/xfs/linux-2.6/xfs_vnode.h @@ -25,12 +25,6 @@ struct attrlist_cursor_kern; typedef struct inode bhv_vnode_t; -#define VN_ISLNK(vp) S_ISLNK((vp)->i_mode) -#define VN_ISREG(vp) S_ISREG((vp)->i_mode) -#define VN_ISDIR(vp) S_ISDIR((vp)->i_mode) -#define VN_ISCHR(vp) S_ISCHR((vp)->i_mode) -#define VN_ISBLK(vp) S_ISBLK((vp)->i_mode) - /* * Vnode to Linux inode mapping. */ @@ -151,24 +145,6 @@ typedef struct bhv_vattr { XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\ XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT) -/* - * Modes. - */ -#define VSUID S_ISUID /* set user id on execution */ -#define VSGID S_ISGID /* set group id on execution */ -#define VSVTX S_ISVTX /* save swapped text even after use */ -#define VREAD S_IRUSR /* read, write, execute permissions */ -#define VWRITE S_IWUSR -#define VEXEC S_IXUSR - -#define MODEMASK S_IALLUGO /* mode bits plus permission bits */ - -/* - * Check whether mandatory file locking is enabled. - */ -#define MANDLOCK(vp, mode) \ - (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID) - extern void vn_init(void); extern int vn_revalidate(bhv_vnode_t *); diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c index 631ebb31b295..85df3288efd5 100644 --- a/fs/xfs/quota/xfs_dquot.c +++ b/fs/xfs/quota/xfs_dquot.c @@ -933,7 +933,7 @@ xfs_qm_dqget( type == XFS_DQ_PROJ || type == XFS_DQ_GROUP); if (ip) { - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (type == XFS_DQ_USER) ASSERT(ip->i_udquot == NULL); else @@ -1088,7 +1088,7 @@ xfs_qm_dqget( xfs_qm_mplist_unlock(mp); XFS_DQ_HASH_UNLOCK(h); dqret: - ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL)); xfs_dqtrace_entry(dqp, "DQGET DONE"); *O_dqpp = dqp; return (0); diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c index 40ea56409561..d31cce1165c5 100644 --- a/fs/xfs/quota/xfs_qm.c +++ b/fs/xfs/quota/xfs_qm.c @@ -670,7 +670,7 @@ xfs_qm_dqattach_one( xfs_dquot_t *dqp; int error; - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); error = 0; /* * See if we already have it in the inode itself. IO_idqpp is @@ -874,7 +874,7 @@ xfs_qm_dqattach( return 0; ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 || - XFS_ISLOCKED_INODE_EXCL(ip)); + xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (! (flags & XFS_QMOPT_ILOCKED)) xfs_ilock(ip, XFS_ILOCK_EXCL); @@ -888,7 +888,8 @@ xfs_qm_dqattach( goto done; nquotas++; } - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (XFS_IS_OQUOTA_ON(mp)) { error = XFS_IS_GQUOTA_ON(mp) ? xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, @@ -913,7 +914,7 @@ xfs_qm_dqattach( * This WON'T, in general, result in a thrash. */ if (nquotas == 2) { - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(ip->i_udquot); ASSERT(ip->i_gdquot); @@ -956,7 +957,7 @@ xfs_qm_dqattach( #ifdef QUOTADEBUG else - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); #endif return error; } @@ -1291,7 +1292,7 @@ xfs_qm_dqget_noattach( xfs_mount_t *mp; xfs_dquot_t *udqp, *gdqp; - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); mp = ip->i_mount; udqp = NULL; gdqp = NULL; @@ -1392,7 +1393,7 @@ xfs_qm_qino_alloc( * Keep an extra reference to this quota inode. This inode is * locked exclusively and joined to the transaction already. */ - ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip)); + ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL)); VN_HOLD(XFS_ITOV((*ip))); /* @@ -1737,12 +1738,6 @@ xfs_qm_dqusage_adjust( return error; } - if (ip->i_d.di_mode == 0) { - xfs_iput_new(ip, XFS_ILOCK_EXCL); - *res = BULKSTAT_RV_NOTHING; - return XFS_ERROR(ENOENT); - } - /* * Obtain the locked dquots. In case of an error (eg. allocation * fails for ENOSPC), we return the negative of the error number @@ -2563,7 +2558,7 @@ xfs_qm_vop_chown( uint bfield = XFS_IS_REALTIME_INODE(ip) ? XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); /* old dquot */ @@ -2607,7 +2602,7 @@ xfs_qm_vop_chown_reserve( uint delblks, blkflags, prjflags = 0; xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; - ASSERT(XFS_ISLOCKED_INODE(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); mp = ip->i_mount; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); @@ -2717,7 +2712,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode( if (!XFS_IS_QUOTA_ON(tp->t_mountp)) return; - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); if (udqp) { diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c index 8342823dbdc3..768a3b27d2b6 100644 --- a/fs/xfs/quota/xfs_qm_syscalls.c +++ b/fs/xfs/quota/xfs_qm_syscalls.c @@ -1366,12 +1366,6 @@ xfs_qm_internalqcheck_adjust( return (error); } - if (ip->i_d.di_mode == 0) { - xfs_iput_new(ip, lock_flags); - *res = BULKSTAT_RV_NOTHING; - return XFS_ERROR(ENOENT); - } - /* * This inode can have blocks after eof which can get released * when we send it to inactive. Since we don't check the dquot diff --git a/fs/xfs/quota/xfs_quota_priv.h b/fs/xfs/quota/xfs_quota_priv.h index a8b85e2be9d5..5e4a40b1c565 100644 --- a/fs/xfs/quota/xfs_quota_priv.h +++ b/fs/xfs/quota/xfs_quota_priv.h @@ -27,11 +27,6 @@ /* Number of dquots that fit in to a dquot block */ #define XFS_QM_DQPERBLK(mp) ((mp)->m_quotainfo->qi_dqperchunk) -#define XFS_ISLOCKED_INODE(ip) (ismrlocked(&(ip)->i_lock, \ - MR_UPDATE | MR_ACCESS) != 0) -#define XFS_ISLOCKED_INODE_EXCL(ip) (ismrlocked(&(ip)->i_lock, \ - MR_UPDATE) != 0) - #define XFS_DQ_IS_ADDEDTO_TRX(t, d) ((d)->q_transp == (t)) #define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims) diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c index f441f836ca8b..99611381e740 100644 --- a/fs/xfs/quota/xfs_trans_dquot.c +++ b/fs/xfs/quota/xfs_trans_dquot.c @@ -834,7 +834,7 @@ xfs_trans_reserve_quota_nblks( ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); - ASSERT(XFS_ISLOCKED_INODE_EXCL(ip)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == XFS_TRANS_DQ_RES_RTBLKS || diff --git a/fs/xfs/xfs.h b/fs/xfs/xfs.h index 765aaf65e2d3..540e4c989825 100644 --- a/fs/xfs/xfs.h +++ b/fs/xfs/xfs.h @@ -22,7 +22,7 @@ #define STATIC #define DEBUG 1 #define XFS_BUF_LOCK_TRACKING 1 -#define QUOTADEBUG 1 +/* #define QUOTADEBUG 1 */ #endif #ifdef CONFIG_XFS_TRACE diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 8e130b9720ae..ebee3a4f703a 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -72,7 +72,7 @@ xfs_acl_vhasacl_default( { int error; - if (!VN_ISDIR(vp)) + if (!S_ISDIR(vp->i_mode)) return 0; xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error); return (error == 0); @@ -238,15 +238,8 @@ xfs_acl_vget( error = EINVAL; goto out; } - if (kind == _ACL_TYPE_ACCESS) { - bhv_vattr_t va; - - va.va_mask = XFS_AT_MODE; - error = xfs_getattr(xfs_vtoi(vp), &va, 0); - if (error) - goto out; - xfs_acl_sync_mode(va.va_mode, xfs_acl); - } + if (kind == _ACL_TYPE_ACCESS) + xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl); error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size); } out: @@ -341,14 +334,15 @@ xfs_acl_iaccess( { xfs_acl_t *acl; int rval; + struct xfs_name acl_name = {SGI_ACL_FILE, SGI_ACL_FILE_SIZE}; if (!(_ACL_ALLOC(acl))) return -1; /* If the file has no ACL return -1. */ rval = sizeof(xfs_acl_t); - if (xfs_attr_fetch(ip, SGI_ACL_FILE, SGI_ACL_FILE_SIZE, - (char *)acl, &rval, ATTR_ROOT | ATTR_KERNACCESS, cr)) { + if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval, + ATTR_ROOT | ATTR_KERNACCESS)) { _ACL_FREE(acl); return -1; } @@ -373,23 +367,15 @@ xfs_acl_allow_set( bhv_vnode_t *vp, int kind) { - xfs_inode_t *ip = xfs_vtoi(vp); - bhv_vattr_t va; - int error; - if (vp->i_flags & (S_IMMUTABLE|S_APPEND)) return EPERM; - if (kind == _ACL_TYPE_DEFAULT && !VN_ISDIR(vp)) + if (kind == _ACL_TYPE_DEFAULT && !S_ISDIR(vp->i_mode)) return ENOTDIR; if (vp->i_sb->s_flags & MS_RDONLY) return EROFS; - va.va_mask = XFS_AT_UID; - error = xfs_getattr(ip, &va, 0); - if (error) - return error; - if (va.va_uid != current->fsuid && !capable(CAP_FOWNER)) + if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER)) return EPERM; - return error; + return 0; } /* @@ -594,7 +580,7 @@ xfs_acl_get_attr( *error = xfs_attr_get(xfs_vtoi(vp), kind == _ACL_TYPE_ACCESS ? SGI_ACL_FILE : SGI_ACL_DEFAULT, - (char *)aclp, &len, flags, sys_cred); + (char *)aclp, &len, flags); if (*error || (flags & ATTR_KERNOVAL)) return; xfs_acl_get_endian(aclp); @@ -643,7 +629,6 @@ xfs_acl_vtoacl( xfs_acl_t *access_acl, xfs_acl_t *default_acl) { - bhv_vattr_t va; int error = 0; if (access_acl) { @@ -652,16 +637,10 @@ xfs_acl_vtoacl( * be obtained for some reason, invalidate the access ACL. */ xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error); - if (!error) { - /* Got the ACL, need the mode... */ - va.va_mask = XFS_AT_MODE; - error = xfs_getattr(xfs_vtoi(vp), &va, 0); - } - if (error) access_acl->acl_cnt = XFS_ACL_NOT_PRESENT; else /* We have a good ACL and the file mode, synchronize. */ - xfs_acl_sync_mode(va.va_mode, access_acl); + xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl); } if (default_acl) { @@ -719,7 +698,7 @@ xfs_acl_inherit( * If the new file is a directory, its default ACL is a copy of * the containing directory's default ACL. */ - if (VN_ISDIR(vp)) + if (S_ISDIR(vp->i_mode)) xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error); if (!error && !basicperms) xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error); @@ -744,7 +723,7 @@ xfs_acl_setmode( bhv_vattr_t va; xfs_acl_entry_t *ap; xfs_acl_entry_t *gap = NULL; - int i, error, nomask = 1; + int i, nomask = 1; *basicperms = 1; @@ -756,11 +735,7 @@ xfs_acl_setmode( * mode. The m:: bits take precedence over the g:: bits. */ va.va_mask = XFS_AT_MODE; - error = xfs_getattr(xfs_vtoi(vp), &va, 0); - if (error) - return error; - - va.va_mask = XFS_AT_MODE; + va.va_mode = xfs_vtoi(vp)->i_d.di_mode; va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO); ap = acl->acl_entry; for (i = 0; i < acl->acl_cnt; ++i) { diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c index 36d781ee5fcc..df151a859186 100644 --- a/fs/xfs/xfs_attr.c +++ b/fs/xfs/xfs_attr.c @@ -101,14 +101,28 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args); ktrace_t *xfs_attr_trace_buf; #endif +STATIC int +xfs_attr_name_to_xname( + struct xfs_name *xname, + const char *aname) +{ + if (!aname) + return EINVAL; + xname->name = aname; + xname->len = strlen(aname); + if (xname->len >= MAXNAMELEN) + return EFAULT; /* match IRIX behaviour */ + + return 0; +} /*======================================================================== * Overall external interface routines. *========================================================================*/ int -xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen, - char *value, int *valuelenp, int flags, struct cred *cred) +xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name, + char *value, int *valuelenp, int flags) { xfs_da_args_t args; int error; @@ -122,8 +136,8 @@ xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen, * Fill in the arg structure for this request. */ memset((char *)&args, 0, sizeof(args)); - args.name = name; - args.namelen = namelen; + args.name = name->name; + args.namelen = name->len; args.value = value; args.valuelen = *valuelenp; args.flags = flags; @@ -162,31 +176,29 @@ xfs_attr_get( const char *name, char *value, int *valuelenp, - int flags, - cred_t *cred) + int flags) { - int error, namelen; + int error; + struct xfs_name xname; XFS_STATS_INC(xs_attr_get); - if (!name) - return(EINVAL); - namelen = strlen(name); - if (namelen >= MAXNAMELEN) - return(EFAULT); /* match IRIX behaviour */ - if (XFS_FORCED_SHUTDOWN(ip->i_mount)) return(EIO); + error = xfs_attr_name_to_xname(&xname, name); + if (error) + return error; + xfs_ilock(ip, XFS_ILOCK_SHARED); - error = xfs_attr_fetch(ip, name, namelen, value, valuelenp, flags, cred); + error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags); xfs_iunlock(ip, XFS_ILOCK_SHARED); return(error); } -int -xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, - char *value, int valuelen, int flags) +STATIC int +xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name, + char *value, int valuelen, int flags) { xfs_da_args_t args; xfs_fsblock_t firstblock; @@ -209,7 +221,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, */ if (XFS_IFORK_Q(dp) == 0) { int sf_size = sizeof(xfs_attr_sf_hdr_t) + - XFS_ATTR_SF_ENTSIZE_BYNAME(namelen, valuelen); + XFS_ATTR_SF_ENTSIZE_BYNAME(name->len, valuelen); if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd))) return(error); @@ -219,8 +231,8 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, * Fill in the arg structure for this request. */ memset((char *)&args, 0, sizeof(args)); - args.name = name; - args.namelen = namelen; + args.name = name->name; + args.namelen = name->len; args.value = value; args.valuelen = valuelen; args.flags = flags; @@ -236,7 +248,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen, * Determine space new attribute will use, and if it would be * "local" or "remote" (note: local != inline). */ - size = xfs_attr_leaf_newentsize(namelen, valuelen, + size = xfs_attr_leaf_newentsize(name->len, valuelen, mp->m_sb.sb_blocksize, &local); nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK); @@ -429,26 +441,27 @@ xfs_attr_set( int valuelen, int flags) { - int namelen; - - namelen = strlen(name); - if (namelen >= MAXNAMELEN) - return EFAULT; /* match IRIX behaviour */ + int error; + struct xfs_name xname; XFS_STATS_INC(xs_attr_set); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return (EIO); - return xfs_attr_set_int(dp, name, namelen, value, valuelen, flags); + error = xfs_attr_name_to_xname(&xname, name); + if (error) + return error; + + return xfs_attr_set_int(dp, &xname, value, valuelen, flags); } /* * Generic handler routine to remove a name from an attribute list. * Transitions attribute list from Btree to shortform as necessary. */ -int -xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags) +STATIC int +xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags) { xfs_da_args_t args; xfs_fsblock_t firstblock; @@ -460,8 +473,8 @@ xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags) * Fill in the arg structure for this request. */ memset((char *)&args, 0, sizeof(args)); - args.name = name; - args.namelen = namelen; + args.name = name->name; + args.namelen = name->len; args.flags = flags; args.hashval = xfs_da_hashname(args.name, args.namelen); args.dp = dp; @@ -575,17 +588,18 @@ xfs_attr_remove( const char *name, int flags) { - int namelen; - - namelen = strlen(name); - if (namelen >= MAXNAMELEN) - return EFAULT; /* match IRIX behaviour */ + int error; + struct xfs_name xname; XFS_STATS_INC(xs_attr_remove); if (XFS_FORCED_SHUTDOWN(dp->i_mount)) return (EIO); + error = xfs_attr_name_to_xname(&xname, name); + if (error) + return error; + xfs_ilock(dp, XFS_ILOCK_SHARED); if (XFS_IFORK_Q(dp) == 0 || (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS && @@ -595,10 +609,10 @@ xfs_attr_remove( } xfs_iunlock(dp, XFS_ILOCK_SHARED); - return xfs_attr_remove_int(dp, name, namelen, flags); + return xfs_attr_remove_int(dp, &xname, flags); } -int /* error */ +STATIC int xfs_attr_list_int(xfs_attr_list_context_t *context) { int error; @@ -2522,8 +2536,7 @@ attr_generic_get( { int error, asize = size; - error = xfs_attr_get(xfs_vtoi(vp), name, data, - &asize, xflags, NULL); + error = xfs_attr_get(xfs_vtoi(vp), name, data, &asize, xflags); if (!error) return asize; return -error; diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h index 786eba3121c4..6cfc9384fe35 100644 --- a/fs/xfs/xfs_attr.h +++ b/fs/xfs/xfs_attr.h @@ -158,14 +158,10 @@ struct xfs_da_args; /* * Overall external interface routines. */ -int xfs_attr_set_int(struct xfs_inode *, const char *, int, char *, int, int); -int xfs_attr_remove_int(struct xfs_inode *, const char *, int, int); -int xfs_attr_list_int(struct xfs_attr_list_context *); int xfs_attr_inactive(struct xfs_inode *dp); int xfs_attr_shortform_getvalue(struct xfs_da_args *); -int xfs_attr_fetch(struct xfs_inode *, const char *, int, - char *, int *, int, struct cred *); +int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int); int xfs_attr_rmtval_get(struct xfs_da_args *args); #endif /* __XFS_ATTR_H__ */ diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index eb198c01c35d..53c259f5a5af 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c @@ -4074,7 +4074,6 @@ xfs_bmap_add_attrfork( error2: xfs_bmap_cancel(&flist); error1: - ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE)); xfs_iunlock(ip, XFS_ILOCK_EXCL); error0: xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT); diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c index 3f53fad356a3..5f3647cb9885 100644 --- a/fs/xfs/xfs_dfrag.c +++ b/fs/xfs/xfs_dfrag.c @@ -162,7 +162,7 @@ xfs_swap_extents( ips[1] = ip; } - xfs_lock_inodes(ips, 2, 0, lock_flags); + xfs_lock_inodes(ips, 2, lock_flags); locked = 1; /* Verify that both files have the same format */ @@ -265,7 +265,7 @@ xfs_swap_extents( locked = 0; goto error0; } - xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); /* * Count the number of extended attribute blocks diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index d3a0f538d6a6..381ebda4f7bc 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -462,7 +462,7 @@ xfs_fs_counts( xfs_mount_t *mp, xfs_fsop_counts_t *cnt) { - xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT); + xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT); spin_lock(&mp->m_sb_lock); cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp); cnt->freertx = mp->m_sb.sb_frextents; @@ -524,7 +524,7 @@ xfs_reserve_blocks( */ retry: spin_lock(&mp->m_sb_lock); - xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED); + xfs_icsb_sync_counters_locked(mp, 0); /* * If our previous reservation was larger than the current value, @@ -552,11 +552,8 @@ retry: mp->m_resblks += free; mp->m_resblks_avail += free; fdblks_delta = -free; - mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp); } else { fdblks_delta = -delta; - mp->m_sb.sb_fdblocks = - lcounter + XFS_ALLOC_SET_ASIDE(mp); mp->m_resblks = request; mp->m_resblks_avail += delta; } @@ -587,7 +584,6 @@ out: if (error == ENOSPC) goto retry; } - return 0; } diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c index a64dfbd565a5..aad8c5da38af 100644 --- a/fs/xfs/xfs_ialloc.c +++ b/fs/xfs/xfs_ialloc.c @@ -147,6 +147,7 @@ xfs_ialloc_ag_alloc( int version; /* inode version number to use */ int isaligned = 0; /* inode allocation at stripe unit */ /* boundary */ + unsigned int gen; args.tp = tp; args.mp = tp->t_mountp; @@ -290,6 +291,14 @@ xfs_ialloc_ag_alloc( else version = XFS_DINODE_VERSION_1; + /* + * Seed the new inode cluster with a random generation number. This + * prevents short-term reuse of generation numbers if a chunk is + * freed and then immediately reallocated. We use random numbers + * rather than a linear progression to prevent the next generation + * number from being easily guessable. + */ + gen = random32(); for (j = 0; j < nbufs; j++) { /* * Get the block. @@ -309,6 +318,7 @@ xfs_ialloc_ag_alloc( free = XFS_MAKE_IPTR(args.mp, fbuf, i); free->di_core.di_magic = cpu_to_be16(XFS_DINODE_MAGIC); free->di_core.di_version = version; + free->di_core.di_gen = cpu_to_be32(gen); free->di_next_unlinked = cpu_to_be32(NULLAGINO); xfs_ialloc_log_di(tp, fbuf, i, XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED); diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index e657c5128460..b07604b94d9f 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c @@ -593,8 +593,9 @@ xfs_iunlock_map_shared( * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL */ void -xfs_ilock(xfs_inode_t *ip, - uint lock_flags) +xfs_ilock( + xfs_inode_t *ip, + uint lock_flags) { /* * You can't set both SHARED and EXCL for the same lock, @@ -607,16 +608,16 @@ xfs_ilock(xfs_inode_t *ip, (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); - if (lock_flags & XFS_IOLOCK_EXCL) { + if (lock_flags & XFS_IOLOCK_EXCL) mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); - } else if (lock_flags & XFS_IOLOCK_SHARED) { + else if (lock_flags & XFS_IOLOCK_SHARED) mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); - } - if (lock_flags & XFS_ILOCK_EXCL) { + + if (lock_flags & XFS_ILOCK_EXCL) mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); - } else if (lock_flags & XFS_ILOCK_SHARED) { + else if (lock_flags & XFS_ILOCK_SHARED) mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); - } + xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address); } @@ -631,15 +632,12 @@ xfs_ilock(xfs_inode_t *ip, * lock_flags -- this parameter indicates the inode's locks to be * to be locked. See the comment for xfs_ilock() for a list * of valid values. - * */ int -xfs_ilock_nowait(xfs_inode_t *ip, - uint lock_flags) +xfs_ilock_nowait( + xfs_inode_t *ip, + uint lock_flags) { - int iolocked; - int ilocked; - /* * You can't set both SHARED and EXCL for the same lock, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, @@ -651,37 +649,30 @@ xfs_ilock_nowait(xfs_inode_t *ip, (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); - iolocked = 0; if (lock_flags & XFS_IOLOCK_EXCL) { - iolocked = mrtryupdate(&ip->i_iolock); - if (!iolocked) { - return 0; - } + if (!mrtryupdate(&ip->i_iolock)) + goto out; } else if (lock_flags & XFS_IOLOCK_SHARED) { - iolocked = mrtryaccess(&ip->i_iolock); - if (!iolocked) { - return 0; - } + if (!mrtryaccess(&ip->i_iolock)) + goto out; } if (lock_flags & XFS_ILOCK_EXCL) { - ilocked = mrtryupdate(&ip->i_lock); - if (!ilocked) { - if (iolocked) { - mrunlock(&ip->i_iolock); - } - return 0; - } + if (!mrtryupdate(&ip->i_lock)) + goto out_undo_iolock; } else if (lock_flags & XFS_ILOCK_SHARED) { - ilocked = mrtryaccess(&ip->i_lock); - if (!ilocked) { - if (iolocked) { - mrunlock(&ip->i_iolock); - } - return 0; - } + if (!mrtryaccess(&ip->i_lock)) + goto out_undo_iolock; } xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address); return 1; + + out_undo_iolock: + if (lock_flags & XFS_IOLOCK_EXCL) + mrunlock_excl(&ip->i_iolock); + else if (lock_flags & XFS_IOLOCK_SHARED) + mrunlock_shared(&ip->i_iolock); + out: + return 0; } /* @@ -697,8 +688,9 @@ xfs_ilock_nowait(xfs_inode_t *ip, * */ void -xfs_iunlock(xfs_inode_t *ip, - uint lock_flags) +xfs_iunlock( + xfs_inode_t *ip, + uint lock_flags) { /* * You can't set both SHARED and EXCL for the same lock, @@ -713,31 +705,25 @@ xfs_iunlock(xfs_inode_t *ip, XFS_LOCK_DEP_MASK)) == 0); ASSERT(lock_flags != 0); - if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { - ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) || - (ismrlocked(&ip->i_iolock, MR_ACCESS))); - ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) || - (ismrlocked(&ip->i_iolock, MR_UPDATE))); - mrunlock(&ip->i_iolock); - } + if (lock_flags & XFS_IOLOCK_EXCL) + mrunlock_excl(&ip->i_iolock); + else if (lock_flags & XFS_IOLOCK_SHARED) + mrunlock_shared(&ip->i_iolock); - if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) { - ASSERT(!(lock_flags & XFS_ILOCK_SHARED) || - (ismrlocked(&ip->i_lock, MR_ACCESS))); - ASSERT(!(lock_flags & XFS_ILOCK_EXCL) || - (ismrlocked(&ip->i_lock, MR_UPDATE))); - mrunlock(&ip->i_lock); + if (lock_flags & XFS_ILOCK_EXCL) + mrunlock_excl(&ip->i_lock); + else if (lock_flags & XFS_ILOCK_SHARED) + mrunlock_shared(&ip->i_lock); + if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) && + !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) { /* * Let the AIL know that this item has been unlocked in case * it is in the AIL and anyone is waiting on it. Don't do * this if the caller has asked us not to. */ - if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) && - ip->i_itemp != NULL) { - xfs_trans_unlocked_item(ip->i_mount, - (xfs_log_item_t*)(ip->i_itemp)); - } + xfs_trans_unlocked_item(ip->i_mount, + (xfs_log_item_t*)(ip->i_itemp)); } xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address); } @@ -747,21 +733,47 @@ xfs_iunlock(xfs_inode_t *ip, * if it is being demoted. */ void -xfs_ilock_demote(xfs_inode_t *ip, - uint lock_flags) +xfs_ilock_demote( + xfs_inode_t *ip, + uint lock_flags) { ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); - if (lock_flags & XFS_ILOCK_EXCL) { - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + if (lock_flags & XFS_ILOCK_EXCL) mrdemote(&ip->i_lock); - } - if (lock_flags & XFS_IOLOCK_EXCL) { - ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + if (lock_flags & XFS_IOLOCK_EXCL) mrdemote(&ip->i_iolock); +} + +#ifdef DEBUG +/* + * Debug-only routine, without additional rw_semaphore APIs, we can + * now only answer requests regarding whether we hold the lock for write + * (reader state is outside our visibility, we only track writer state). + * + * Note: this means !xfs_isilocked would give false positives, so don't do that. + */ +int +xfs_isilocked( + xfs_inode_t *ip, + uint lock_flags) +{ + if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) == + XFS_ILOCK_EXCL) { + if (!ip->i_lock.mr_writer) + return 0; } + + if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) == + XFS_IOLOCK_EXCL) { + if (!ip->i_iolock.mr_writer) + return 0; + } + + return 1; } +#endif /* * The following three routines simply manage the i_flock diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index ca12acb90394..cf0bb9c1d621 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -1291,7 +1291,7 @@ xfs_file_last_byte( xfs_fileoff_t size_last_block; int error; - ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS)); + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); mp = ip->i_mount; /* @@ -1402,7 +1402,7 @@ xfs_itruncate_start( bhv_vnode_t *vp; int error = 0; - ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT((new_size == 0) || (new_size <= ip->i_size)); ASSERT((flags == XFS_ITRUNC_DEFINITE) || (flags == XFS_ITRUNC_MAYBE)); @@ -1528,8 +1528,7 @@ xfs_itruncate_finish( xfs_bmap_free_t free_list; int error; - ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0); - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); ASSERT((new_size == 0) || (new_size <= ip->i_size)); ASSERT(*tp != NULL); ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); @@ -1780,8 +1779,7 @@ xfs_igrow_start( xfs_fsize_t new_size, cred_t *credp) { - ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); - ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); ASSERT(new_size > ip->i_size); /* @@ -1809,8 +1807,7 @@ xfs_igrow_finish( xfs_fsize_t new_size, int change_flag) { - ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0); - ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); ASSERT(ip->i_transp == tp); ASSERT(new_size > ip->i_size); @@ -2287,7 +2284,7 @@ xfs_ifree( xfs_dinode_t *dip; xfs_buf_t *ibp; - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(ip->i_transp == tp); ASSERT(ip->i_d.di_nlink == 0); ASSERT(ip->i_d.di_nextents == 0); @@ -2746,7 +2743,7 @@ void xfs_ipin( xfs_inode_t *ip) { - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); atomic_inc(&ip->i_pincount); } @@ -2779,7 +2776,7 @@ __xfs_iunpin_wait( { xfs_inode_log_item_t *iip = ip->i_itemp; - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); if (atomic_read(&ip->i_pincount) == 0) return; @@ -2829,7 +2826,7 @@ xfs_iextents_copy( xfs_fsblock_t start_block; ifp = XFS_IFORK_PTR(ip, whichfork); - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(ifp->if_bytes > 0); nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); @@ -3132,7 +3129,7 @@ xfs_iflush( XFS_STATS_INC(xs_iflush_count); - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(issemalocked(&(ip->i_flock))); ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ip->i_d.di_nextents > ip->i_df.if_ext_max); @@ -3297,7 +3294,7 @@ xfs_iflush_int( int first; #endif - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(issemalocked(&(ip->i_flock))); ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE || ip->i_d.di_nextents > ip->i_df.if_ext_max); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 93c37697a72c..0a999fee4f03 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -386,20 +386,9 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) #define XFS_ILOCK_EXCL (1<<2) #define XFS_ILOCK_SHARED (1<<3) #define XFS_IUNLOCK_NONOTIFY (1<<4) -/* #define XFS_IOLOCK_NESTED (1<<5) */ -#define XFS_EXTENT_TOKEN_RD (1<<6) -#define XFS_SIZE_TOKEN_RD (1<<7) -#define XFS_EXTSIZE_RD (XFS_EXTENT_TOKEN_RD|XFS_SIZE_TOKEN_RD) -#define XFS_WILLLEND (1<<8) /* Always acquire tokens for lending */ -#define XFS_EXTENT_TOKEN_WR (XFS_EXTENT_TOKEN_RD | XFS_WILLLEND) -#define XFS_SIZE_TOKEN_WR (XFS_SIZE_TOKEN_RD | XFS_WILLLEND) -#define XFS_EXTSIZE_WR (XFS_EXTSIZE_RD | XFS_WILLLEND) -/* TODO:XFS_SIZE_TOKEN_WANT (1<<9) */ #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ - | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \ - | XFS_EXTENT_TOKEN_RD | XFS_SIZE_TOKEN_RD \ - | XFS_WILLLEND) + | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED) /* * Flags for lockdep annotations. @@ -483,6 +472,7 @@ void xfs_ilock(xfs_inode_t *, uint); int xfs_ilock_nowait(xfs_inode_t *, uint); void xfs_iunlock(xfs_inode_t *, uint); void xfs_ilock_demote(xfs_inode_t *, uint); +int xfs_isilocked(xfs_inode_t *, uint); void xfs_iflock(xfs_inode_t *); int xfs_iflock_nowait(xfs_inode_t *); uint xfs_ilock_map_shared(xfs_inode_t *); @@ -534,7 +524,7 @@ int xfs_iflush(xfs_inode_t *, uint); void xfs_iflush_all(struct xfs_mount *); void xfs_ichgtime(xfs_inode_t *, int); xfs_fsize_t xfs_file_last_byte(xfs_inode_t *); -void xfs_lock_inodes(xfs_inode_t **, int, int, uint); +void xfs_lock_inodes(xfs_inode_t **, int, uint); void xfs_synchronize_atime(xfs_inode_t *); void xfs_mark_inode_dirty_sync(xfs_inode_t *); diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 93b5db453ea2..167b33f15772 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -547,7 +547,7 @@ STATIC void xfs_inode_item_pin( xfs_inode_log_item_t *iip) { - ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); + ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); xfs_ipin(iip->ili_inode); } @@ -664,13 +664,13 @@ xfs_inode_item_unlock( ASSERT(iip != NULL); ASSERT(iip->ili_inode->i_itemp != NULL); - ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE)); + ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); ASSERT((!(iip->ili_inode->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)) || - ismrlocked(&(iip->ili_inode->i_iolock), MR_UPDATE)); + xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL)); ASSERT((!(iip->ili_inode->i_itemp->ili_flags & XFS_ILI_IOLOCKED_SHARED)) || - ismrlocked(&(iip->ili_inode->i_iolock), MR_ACCESS)); + xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED)); /* * Clear the transaction pointer in the inode. */ @@ -769,7 +769,7 @@ xfs_inode_item_pushbuf( ip = iip->ili_inode; - ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); /* * The ili_pushbuf_flag keeps others from @@ -857,7 +857,7 @@ xfs_inode_item_push( ip = iip->ili_inode; - ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); ASSERT(issemalocked(&(ip->i_flock))); /* * Since we were able to lock the inode's flush lock and diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index fb3cf1191419..7edcde691d1a 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -196,14 +196,14 @@ xfs_iomap( break; case BMAPI_WRITE: xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count); - lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR; + lockmode = XFS_ILOCK_EXCL; if (flags & BMAPI_IGNSTATE) bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE; xfs_ilock(ip, lockmode); break; case BMAPI_ALLOCATE: xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count); - lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD; + lockmode = XFS_ILOCK_SHARED; bmapi_flags = XFS_BMAPI_ENTIRE; /* Attempt non-blocking lock */ @@ -523,8 +523,7 @@ xfs_iomap_write_direct( goto error_out; } - if (unlikely(!imap.br_startblock && - !(XFS_IS_REALTIME_INODE(ip)))) { + if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) { error = xfs_cmn_err_fsblock_zero(ip, &imap); goto error_out; } @@ -624,7 +623,7 @@ xfs_iomap_write_delay( int prealloc, fsynced = 0; int error; - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); /* * Make sure that the dquots are there. This doesn't hold @@ -686,8 +685,7 @@ retry: goto retry; } - if (unlikely(!imap[0].br_startblock && - !(XFS_IS_REALTIME_INODE(ip)))) + if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_cmn_err_fsblock_zero(ip, &imap[0]); *ret_imap = imap[0]; @@ -838,9 +836,9 @@ xfs_iomap_write_allocate( * See if we were able to allocate an extent that * covers at least part of the callers request */ - if (unlikely(!imap.br_startblock && - XFS_IS_REALTIME_INODE(ip))) + if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_cmn_err_fsblock_zero(ip, &imap); + if ((offset_fsb >= imap.br_startoff) && (offset_fsb < (imap.br_startoff + imap.br_blockcount))) { @@ -934,8 +932,7 @@ xfs_iomap_write_unwritten( if (error) return XFS_ERROR(error); - if (unlikely(!imap.br_startblock && - !(XFS_IS_REALTIME_INODE(ip)))) + if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) return xfs_cmn_err_fsblock_zero(ip, &imap); if ((numblks_fsb = imap.br_blockcount) == 0) { diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c index eb85bdedad0c..419de15aeb43 100644 --- a/fs/xfs/xfs_itable.c +++ b/fs/xfs/xfs_itable.c @@ -71,11 +71,6 @@ xfs_bulkstat_one_iget( ASSERT(ip != NULL); ASSERT(ip->i_blkno != (xfs_daddr_t)0); - if (ip->i_d.di_mode == 0) { - *stat = BULKSTAT_RV_NOTHING; - error = XFS_ERROR(ENOENT); - goto out_iput; - } vp = XFS_ITOV(ip); dic = &ip->i_d; @@ -124,7 +119,6 @@ xfs_bulkstat_one_iget( break; } - out_iput: xfs_iput(ip, XFS_ILOCK_SHARED); return error; } diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index 2fec452afbcc..da3988453b71 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -54,8 +54,9 @@ STATIC void xfs_unmountfs_wait(xfs_mount_t *); #ifdef HAVE_PERCPU_SB STATIC void xfs_icsb_destroy_counters(xfs_mount_t *); STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t, - int, int); -STATIC void xfs_icsb_sync_counters(xfs_mount_t *); + int); +STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t, + int); STATIC int xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t, int64_t, int); STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); @@ -63,8 +64,8 @@ STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t); #else #define xfs_icsb_destroy_counters(mp) do { } while (0) -#define xfs_icsb_balance_counter(mp, a, b, c) do { } while (0) -#define xfs_icsb_sync_counters(mp) do { } while (0) +#define xfs_icsb_balance_counter(mp, a, b) do { } while (0) +#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0) #define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0) #endif @@ -1400,7 +1401,7 @@ xfs_log_sbcount( if (!xfs_fs_writable(mp)) return 0; - xfs_icsb_sync_counters(mp); + xfs_icsb_sync_counters(mp, 0); /* * we don't need to do this if we are updating the superblock @@ -2026,9 +2027,9 @@ xfs_icsb_cpu_notify( case CPU_ONLINE: case CPU_ONLINE_FROZEN: xfs_icsb_lock(mp); - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); xfs_icsb_unlock(mp); break; case CPU_DEAD: @@ -2048,12 +2049,9 @@ xfs_icsb_cpu_notify( memset(cntp, 0, sizeof(xfs_icsb_cnts_t)); - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, - XFS_ICSB_SB_LOCKED, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, - XFS_ICSB_SB_LOCKED, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, - XFS_ICSB_SB_LOCKED, 0); + xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0); + xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0); + xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0); spin_unlock(&mp->m_sb_lock); xfs_icsb_unlock(mp); break; @@ -2105,9 +2103,9 @@ xfs_icsb_reinit_counters( * initial balance kicks us off correctly */ mp->m_icsb_counters = -1; - xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0); - xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0); + xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0); xfs_icsb_unlock(mp); } @@ -2223,7 +2221,7 @@ xfs_icsb_disable_counter( if (!test_and_set_bit(field, &mp->m_icsb_counters)) { /* drain back to superblock */ - xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT); + xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT); switch(field) { case XFS_SBS_ICOUNT: mp->m_sb.sb_icount = cnt.icsb_icount; @@ -2278,38 +2276,33 @@ xfs_icsb_enable_counter( } void -xfs_icsb_sync_counters_flags( +xfs_icsb_sync_counters_locked( xfs_mount_t *mp, int flags) { xfs_icsb_cnts_t cnt; - /* Pass 1: lock all counters */ - if ((flags & XFS_ICSB_SB_LOCKED) == 0) - spin_lock(&mp->m_sb_lock); - xfs_icsb_count(mp, &cnt, flags); - /* Step 3: update mp->m_sb fields */ if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT)) mp->m_sb.sb_icount = cnt.icsb_icount; if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE)) mp->m_sb.sb_ifree = cnt.icsb_ifree; if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS)) mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks; - - if ((flags & XFS_ICSB_SB_LOCKED) == 0) - spin_unlock(&mp->m_sb_lock); } /* * Accurate update of per-cpu counters to incore superblock */ -STATIC void +void xfs_icsb_sync_counters( - xfs_mount_t *mp) + xfs_mount_t *mp, + int flags) { - xfs_icsb_sync_counters_flags(mp, 0); + spin_lock(&mp->m_sb_lock); + xfs_icsb_sync_counters_locked(mp, flags); + spin_unlock(&mp->m_sb_lock); } /* @@ -2332,19 +2325,15 @@ xfs_icsb_sync_counters( #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \ (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp)) STATIC void -xfs_icsb_balance_counter( +xfs_icsb_balance_counter_locked( xfs_mount_t *mp, xfs_sb_field_t field, - int flags, int min_per_cpu) { uint64_t count, resid; int weight = num_online_cpus(); uint64_t min = (uint64_t)min_per_cpu; - if (!(flags & XFS_ICSB_SB_LOCKED)) - spin_lock(&mp->m_sb_lock); - /* disable counter and sync counter */ xfs_icsb_disable_counter(mp, field); @@ -2354,19 +2343,19 @@ xfs_icsb_balance_counter( count = mp->m_sb.sb_icount; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) - goto out; + return; break; case XFS_SBS_IFREE: count = mp->m_sb.sb_ifree; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE)) - goto out; + return; break; case XFS_SBS_FDBLOCKS: count = mp->m_sb.sb_fdblocks; resid = do_div(count, weight); if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp))) - goto out; + return; break; default: BUG(); @@ -2375,9 +2364,17 @@ xfs_icsb_balance_counter( } xfs_icsb_enable_counter(mp, field, count, resid); -out: - if (!(flags & XFS_ICSB_SB_LOCKED)) - spin_unlock(&mp->m_sb_lock); +} + +STATIC void +xfs_icsb_balance_counter( + xfs_mount_t *mp, + xfs_sb_field_t fields, + int min_per_cpu) +{ + spin_lock(&mp->m_sb_lock); + xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu); + spin_unlock(&mp->m_sb_lock); } STATIC int @@ -2484,7 +2481,7 @@ slow_path: * we are done. */ if (ret != ENOSPC) - xfs_icsb_balance_counter(mp, field, 0, 0); + xfs_icsb_balance_counter(mp, field, 0); xfs_icsb_unlock(mp); return ret; @@ -2508,7 +2505,7 @@ balance_counter: * will either succeed through the fast path or slow path without * another balance operation being required. */ - xfs_icsb_balance_counter(mp, field, 0, delta); + xfs_icsb_balance_counter(mp, field, delta); xfs_icsb_unlock(mp); goto again; } diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 1ed575110ff0..63e0693a358a 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -206,17 +206,18 @@ typedef struct xfs_icsb_cnts { #define XFS_ICSB_FLAG_LOCK (1 << 0) /* counter lock bit */ -#define XFS_ICSB_SB_LOCKED (1 << 0) /* sb already locked */ #define XFS_ICSB_LAZY_COUNT (1 << 1) /* accuracy not needed */ extern int xfs_icsb_init_counters(struct xfs_mount *); extern void xfs_icsb_reinit_counters(struct xfs_mount *); -extern void xfs_icsb_sync_counters_flags(struct xfs_mount *, int); +extern void xfs_icsb_sync_counters(struct xfs_mount *, int); +extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int); #else #define xfs_icsb_init_counters(mp) (0) #define xfs_icsb_reinit_counters(mp) do { } while (0) -#define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0) +#define xfs_icsb_sync_counters(mp, flags) do { } while (0) +#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0) #endif typedef struct xfs_ail { diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c index ee371890d85d..d8063e1ad298 100644 --- a/fs/xfs/xfs_rename.c +++ b/fs/xfs/xfs_rename.c @@ -55,85 +55,32 @@ xfs_rename_unlock4( xfs_iunlock(i_tab[0], lock_mode); for (i = 1; i < 4; i++) { - if (i_tab[i] == NULL) { + if (i_tab[i] == NULL) break; - } + /* * Watch out for duplicate entries in the table. */ - if (i_tab[i] != i_tab[i-1]) { + if (i_tab[i] != i_tab[i-1]) xfs_iunlock(i_tab[i], lock_mode); - } } } -#ifdef DEBUG -int xfs_rename_skip, xfs_rename_nskip; -#endif - /* - * The following routine will acquire the locks required for a rename - * operation. The code understands the semantics of renames and will - * validate that name1 exists under dp1 & that name2 may or may not - * exist under dp2. - * - * We are renaming dp1/name1 to dp2/name2. - * - * Return ENOENT if dp1 does not exist, other lookup errors, or 0 for success. + * Enter all inodes for a rename transaction into a sorted array. */ -STATIC int -xfs_lock_for_rename( +STATIC void +xfs_sort_for_rename( xfs_inode_t *dp1, /* in: old (source) directory inode */ xfs_inode_t *dp2, /* in: new (target) directory inode */ xfs_inode_t *ip1, /* in: inode of old entry */ - struct xfs_name *name2, /* in: new entry name */ - xfs_inode_t **ipp2, /* out: inode of new entry, if it + xfs_inode_t *ip2, /* in: inode of new entry, if it already exists, NULL otherwise. */ xfs_inode_t **i_tab,/* out: array of inode returned, sorted */ int *num_inodes) /* out: number of inodes in array */ { - xfs_inode_t *ip2 = NULL; xfs_inode_t *temp; - xfs_ino_t inum1, inum2; - int error; int i, j; - uint lock_mode; - int diff_dirs = (dp1 != dp2); - - /* - * First, find out the current inums of the entries so that we - * can determine the initial locking order. We'll have to - * sanity check stuff after all the locks have been acquired - * to see if we still have the right inodes, directories, etc. - */ - lock_mode = xfs_ilock_map_shared(dp1); - IHOLD(ip1); - xfs_itrace_ref(ip1); - - inum1 = ip1->i_ino; - - /* - * Unlock dp1 and lock dp2 if they are different. - */ - if (diff_dirs) { - xfs_iunlock_map_shared(dp1, lock_mode); - lock_mode = xfs_ilock_map_shared(dp2); - } - - error = xfs_dir_lookup_int(dp2, lock_mode, name2, &inum2, &ip2); - if (error == ENOENT) { /* target does not need to exist. */ - inum2 = 0; - } else if (error) { - /* - * If dp2 and dp1 are the same, the next line unlocks dp1. - * Got it? - */ - xfs_iunlock_map_shared(dp2, lock_mode); - IRELE (ip1); - return error; - } else { - xfs_itrace_ref(ip2); - } /* * i_tab contains a list of pointers to inodes. We initialize @@ -145,21 +92,20 @@ xfs_lock_for_rename( i_tab[0] = dp1; i_tab[1] = dp2; i_tab[2] = ip1; - if (inum2 == 0) { - *num_inodes = 3; - i_tab[3] = NULL; - } else { + if (ip2) { *num_inodes = 4; i_tab[3] = ip2; + } else { + *num_inodes = 3; + i_tab[3] = NULL; } - *ipp2 = i_tab[3]; /* * Sort the elements via bubble sort. (Remember, there are at * most 4 elements to sort, so this is adequate.) */ - for (i=0; i < *num_inodes; i++) { - for (j=1; j < *num_inodes; j++) { + for (i = 0; i < *num_inodes; i++) { + for (j = 1; j < *num_inodes; j++) { if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { temp = i_tab[j]; i_tab[j] = i_tab[j-1]; @@ -167,30 +113,6 @@ xfs_lock_for_rename( } } } - - /* - * We have dp2 locked. If it isn't first, unlock it. - * If it is first, tell xfs_lock_inodes so it can skip it - * when locking. if dp1 == dp2, xfs_lock_inodes will skip both - * since they are equal. xfs_lock_inodes needs all these inodes - * so that it can unlock and retry if there might be a dead-lock - * potential with the log. - */ - - if (i_tab[0] == dp2 && lock_mode == XFS_ILOCK_SHARED) { -#ifdef DEBUG - xfs_rename_skip++; -#endif - xfs_lock_inodes(i_tab, *num_inodes, 1, XFS_ILOCK_SHARED); - } else { -#ifdef DEBUG - xfs_rename_nskip++; -#endif - xfs_iunlock_map_shared(dp2, lock_mode); - xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED); - } - - return 0; } /* @@ -202,10 +124,10 @@ xfs_rename( struct xfs_name *src_name, xfs_inode_t *src_ip, xfs_inode_t *target_dp, - struct xfs_name *target_name) + struct xfs_name *target_name, + xfs_inode_t *target_ip) { - xfs_trans_t *tp; - xfs_inode_t *target_ip; + xfs_trans_t *tp = NULL; xfs_mount_t *mp = src_dp->i_mount; int new_parent; /* moving to a new dir */ int src_is_directory; /* src_name is a directory */ @@ -215,9 +137,7 @@ xfs_rename( int cancel_flags; int committed; xfs_inode_t *inodes[4]; - int target_ip_dropped = 0; /* dropped target_ip link? */ int spaceres; - int target_link_zero = 0; int num_inodes; xfs_itrace_entry(src_dp); @@ -230,64 +150,27 @@ xfs_rename( target_dp, DM_RIGHT_NULL, src_name->name, target_name->name, 0, 0, 0); - if (error) { + if (error) return error; - } } /* Return through std_return after this point. */ - /* - * Lock all the participating inodes. Depending upon whether - * the target_name exists in the target directory, and - * whether the target directory is the same as the source - * directory, we can lock from 2 to 4 inodes. - * xfs_lock_for_rename() will return ENOENT if src_name - * does not exist in the source directory. - */ - tp = NULL; - error = xfs_lock_for_rename(src_dp, target_dp, src_ip, target_name, - &target_ip, inodes, &num_inodes); - if (error) { - /* - * We have nothing locked, no inode references, and - * no transaction, so just get out. - */ - goto std_return; - } - - ASSERT(src_ip != NULL); + new_parent = (src_dp != target_dp); + src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); - if ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR) { + if (src_is_directory) { /* * Check for link count overflow on target_dp */ - if (target_ip == NULL && (src_dp != target_dp) && + if (target_ip == NULL && new_parent && target_dp->i_d.di_nlink >= XFS_MAXLINK) { error = XFS_ERROR(EMLINK); - xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); - goto rele_return; + goto std_return; } } - /* - * If we are using project inheritance, we only allow renames - * into our tree when the project IDs are the same; else the - * tree quota mechanism would be circumvented. - */ - if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && - (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { - error = XFS_ERROR(EXDEV); - xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); - goto rele_return; - } - - new_parent = (src_dp != target_dp); - src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR); - - /* - * Drop the locks on our inodes so that we can start the transaction. - */ - xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, + inodes, &num_inodes); XFS_BMAP_INIT(&free_list, &first_block); tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME); @@ -302,7 +185,7 @@ xfs_rename( } if (error) { xfs_trans_cancel(tp, 0); - goto rele_return; + goto std_return; } /* @@ -310,13 +193,29 @@ xfs_rename( */ if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) { xfs_trans_cancel(tp, cancel_flags); - goto rele_return; + goto std_return; } /* - * Reacquire the inode locks we dropped above. + * Lock all the participating inodes. Depending upon whether + * the target_name exists in the target directory, and + * whether the target directory is the same as the source + * directory, we can lock from 2 to 4 inodes. + */ + xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); + + /* + * If we are using project inheritance, we only allow renames + * into our tree when the project IDs are the same; else the + * tree quota mechanism would be circumvented. */ - xfs_lock_inodes(inodes, num_inodes, 0, XFS_ILOCK_EXCL); + if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) && + (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) { + error = XFS_ERROR(EXDEV); + xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED); + xfs_trans_cancel(tp, cancel_flags); + goto std_return; + } /* * Join all the inodes to the transaction. From this point on, @@ -328,17 +227,17 @@ xfs_rename( */ IHOLD(src_dp); xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); + if (new_parent) { IHOLD(target_dp); xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); } - if ((src_ip != src_dp) && (src_ip != target_dp)) { - xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); - } - if ((target_ip != NULL) && - (target_ip != src_ip) && - (target_ip != src_dp) && - (target_ip != target_dp)) { + + IHOLD(src_ip); + xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); + + if (target_ip) { + IHOLD(target_ip); xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); } @@ -412,7 +311,6 @@ xfs_rename( error = xfs_droplink(tp, target_ip); if (error) goto abort_return; - target_ip_dropped = 1; if (src_is_directory) { /* @@ -422,10 +320,6 @@ xfs_rename( if (error) goto abort_return; } - - /* Do this test while we still hold the locks */ - target_link_zero = (target_ip)->i_d.di_nlink==0; - } /* target_ip != NULL */ /* @@ -492,15 +386,6 @@ xfs_rename( } /* - * If there was a target inode, take an extra reference on - * it here so that it doesn't go to xfs_inactive() from - * within the commit. - */ - if (target_ip != NULL) { - IHOLD(target_ip); - } - - /* * If this is a synchronous mount, make sure that the * rename transaction goes to disk before returning to * the user. @@ -509,30 +394,11 @@ xfs_rename( xfs_trans_set_sync(tp); } - /* - * Take refs. for vop_link_removed calls below. No need to worry - * about directory refs. because the caller holds them. - * - * Do holds before the xfs_bmap_finish since it might rele them down - * to zero. - */ - - if (target_ip_dropped) - IHOLD(target_ip); - IHOLD(src_ip); - error = xfs_bmap_finish(&tp, &free_list, &committed); if (error) { xfs_bmap_cancel(&free_list); xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); - if (target_ip != NULL) { - IRELE(target_ip); - } - if (target_ip_dropped) { - IRELE(target_ip); - } - IRELE(src_ip); goto std_return; } @@ -541,15 +407,6 @@ xfs_rename( * the vnode references. */ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); - if (target_ip != NULL) - IRELE(target_ip); - /* - * Let interposed file systems know about removed links. - */ - if (target_ip_dropped) - IRELE(target_ip); - - IRELE(src_ip); /* Fall through to std_return with error = 0 or errno from * xfs_trans_commit */ @@ -571,11 +428,4 @@ std_return: xfs_bmap_cancel(&free_list); xfs_trans_cancel(tp, cancel_flags); goto std_return; - - rele_return: - IRELE(src_ip); - if (target_ip != NULL) { - IRELE(target_ip); - } - goto std_return; } diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c index b8db1d5cde5a..4c70bf5e9985 100644 --- a/fs/xfs/xfs_trans_inode.c +++ b/fs/xfs/xfs_trans_inode.c @@ -111,13 +111,13 @@ xfs_trans_iget( */ ASSERT(ip->i_itemp != NULL); ASSERT(lock_flags & XFS_ILOCK_EXCL); - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || - ismrlocked(&ip->i_iolock, MR_UPDATE)); + xfs_isilocked(ip, XFS_IOLOCK_EXCL)); ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)); ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || - ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS))); + xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)); ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY)); @@ -185,7 +185,7 @@ xfs_trans_ijoin( xfs_inode_log_item_t *iip; ASSERT(ip->i_transp == NULL); - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(lock_flags & XFS_ILOCK_EXCL); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); @@ -232,7 +232,7 @@ xfs_trans_ihold( { ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ip->i_itemp->ili_flags |= XFS_ILI_HOLD; } @@ -257,7 +257,7 @@ xfs_trans_log_inode( ASSERT(ip->i_transp == tp); ASSERT(ip->i_itemp != NULL); - ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp)); ASSERT(lidp != NULL); diff --git a/fs/xfs/xfs_utils.c b/fs/xfs/xfs_utils.c index 2b8dc7e40772..98e5f110ba5f 100644 --- a/fs/xfs/xfs_utils.c +++ b/fs/xfs/xfs_utils.c @@ -41,49 +41,6 @@ #include "xfs_utils.h" -int -xfs_dir_lookup_int( - xfs_inode_t *dp, - uint lock_mode, - struct xfs_name *name, - xfs_ino_t *inum, - xfs_inode_t **ipp) -{ - int error; - - xfs_itrace_entry(dp); - - error = xfs_dir_lookup(NULL, dp, name, inum); - if (!error) { - /* - * Unlock the directory. We do this because we can't - * hold the directory lock while doing the vn_get() - * in xfs_iget(). Doing so could cause us to hold - * a lock while waiting for the inode to finish - * being inactive while it's waiting for a log - * reservation in the inactive routine. - */ - xfs_iunlock(dp, lock_mode); - error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0); - xfs_ilock(dp, lock_mode); - - if (error) { - *ipp = NULL; - } else if ((*ipp)->i_d.di_mode == 0) { - /* - * The inode has been freed. Something is - * wrong so just get out of here. - */ - xfs_iunlock(dp, lock_mode); - xfs_iput_new(*ipp, 0); - *ipp = NULL; - xfs_ilock(dp, lock_mode); - error = XFS_ERROR(ENOENT); - } - } - return error; -} - /* * Allocates a new inode from disk and return a pointer to the * incore copy. This routine will internally commit the current @@ -310,7 +267,7 @@ xfs_bump_ino_vers2( { xfs_mount_t *mp; - ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1); ip->i_d.di_version = XFS_DINODE_VERSION_2; diff --git a/fs/xfs/xfs_utils.h b/fs/xfs/xfs_utils.h index 175b126d2cab..f316cb85d8e2 100644 --- a/fs/xfs/xfs_utils.h +++ b/fs/xfs/xfs_utils.h @@ -21,8 +21,6 @@ #define IRELE(ip) VN_RELE(XFS_ITOV(ip)) #define IHOLD(ip) VN_HOLD(XFS_ITOV(ip)) -extern int xfs_dir_lookup_int(xfs_inode_t *, uint, struct xfs_name *, - xfs_ino_t *, xfs_inode_t **); extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *); extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t, xfs_dev_t, cred_t *, prid_t, int, diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c index fc48158fe479..30bacd8bb0e5 100644 --- a/fs/xfs/xfs_vfsops.c +++ b/fs/xfs/xfs_vfsops.c @@ -186,6 +186,7 @@ xfs_cleanup(void) kmem_zone_destroy(xfs_efi_zone); kmem_zone_destroy(xfs_ifork_zone); kmem_zone_destroy(xfs_ili_zone); + kmem_zone_destroy(xfs_log_ticket_zone); } /* diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 6650601c64f7..70702a60b4bb 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c @@ -76,132 +76,6 @@ xfs_open( } /* - * xfs_getattr - */ -int -xfs_getattr( - xfs_inode_t *ip, - bhv_vattr_t *vap, - int flags) -{ - bhv_vnode_t *vp = XFS_ITOV(ip); - xfs_mount_t *mp = ip->i_mount; - - xfs_itrace_entry(ip); - - if (XFS_FORCED_SHUTDOWN(mp)) - return XFS_ERROR(EIO); - - if (!(flags & ATTR_LAZY)) - xfs_ilock(ip, XFS_ILOCK_SHARED); - - vap->va_size = XFS_ISIZE(ip); - if (vap->va_mask == XFS_AT_SIZE) - goto all_done; - - vap->va_nblocks = - XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks); - vap->va_nodeid = ip->i_ino; -#if XFS_BIG_INUMS - vap->va_nodeid += mp->m_inoadd; -#endif - vap->va_nlink = ip->i_d.di_nlink; - - /* - * Quick exit for non-stat callers - */ - if ((vap->va_mask & - ~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID| - XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0) - goto all_done; - - /* - * Copy from in-core inode. - */ - vap->va_mode = ip->i_d.di_mode; - vap->va_uid = ip->i_d.di_uid; - vap->va_gid = ip->i_d.di_gid; - vap->va_projid = ip->i_d.di_projid; - - /* - * Check vnode type block/char vs. everything else. - */ - switch (ip->i_d.di_mode & S_IFMT) { - case S_IFBLK: - case S_IFCHR: - vap->va_rdev = ip->i_df.if_u2.if_rdev; - vap->va_blocksize = BLKDEV_IOSIZE; - break; - default: - vap->va_rdev = 0; - - if (!(XFS_IS_REALTIME_INODE(ip))) { - vap->va_blocksize = xfs_preferred_iosize(mp); - } else { - - /* - * If the file blocks are being allocated from a - * realtime partition, then return the inode's - * realtime extent size or the realtime volume's - * extent size. - */ - vap->va_blocksize = - xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog; - } - break; - } - - vn_atime_to_timespec(vp, &vap->va_atime); - vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec; - vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec; - vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec; - vap->va_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec; - - /* - * Exit for stat callers. See if any of the rest of the fields - * to be filled in are needed. - */ - if ((vap->va_mask & - (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS| - XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) - goto all_done; - - /* - * Convert di_flags to xflags. - */ - vap->va_xflags = xfs_ip2xflags(ip); - - /* - * Exit for inode revalidate. See if any of the rest of - * the fields to be filled in are needed. - */ - if ((vap->va_mask & - (XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS| - XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0) - goto all_done; - - vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog; - vap->va_nextents = - (ip->i_df.if_flags & XFS_IFEXTENTS) ? - ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) : - ip->i_d.di_nextents; - if (ip->i_afp) - vap->va_anextents = - (ip->i_afp->if_flags & XFS_IFEXTENTS) ? - ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) : - ip->i_d.di_anextents; - else - vap->va_anextents = 0; - vap->va_gen = ip->i_d.di_gen; - - all_done: - if (!(flags & ATTR_LAZY)) - xfs_iunlock(ip, XFS_ILOCK_SHARED); - return 0; -} - - -/* * xfs_setattr */ int @@ -211,7 +85,6 @@ xfs_setattr( int flags, cred_t *credp) { - bhv_vnode_t *vp = XFS_ITOV(ip); xfs_mount_t *mp = ip->i_mount; xfs_trans_t *tp; int mask; @@ -222,7 +95,6 @@ xfs_setattr( gid_t gid=0, igid=0; int timeflags = 0; xfs_prid_t projid=0, iprojid=0; - int mandlock_before, mandlock_after; struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; int file_owner; int need_iolock = 1; @@ -383,7 +255,7 @@ xfs_setattr( m |= S_ISGID; #if 0 /* Linux allows this, Irix doesn't. */ - if ((vap->va_mode & S_ISVTX) && !VN_ISDIR(vp)) + if ((vap->va_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode)) m |= S_ISVTX; #endif if (m && !capable(CAP_FSETID)) @@ -461,10 +333,10 @@ xfs_setattr( goto error_return; } - if (VN_ISDIR(vp)) { + if (S_ISDIR(ip->i_d.di_mode)) { code = XFS_ERROR(EISDIR); goto error_return; - } else if (!VN_ISREG(vp)) { + } else if (!S_ISREG(ip->i_d.di_mode)) { code = XFS_ERROR(EINVAL); goto error_return; } @@ -626,9 +498,6 @@ xfs_setattr( xfs_trans_ihold(tp, ip); } - /* determine whether mandatory locking mode changes */ - mandlock_before = MANDLOCK(vp, ip->i_d.di_mode); - /* * Truncate file. Must have write permission and not be a directory. */ @@ -858,13 +727,6 @@ xfs_setattr( code = xfs_trans_commit(tp, commit_flags); } - /* - * If the (regular) file's mandatory locking mode changed, then - * notify the vnode. We do this under the inode lock to prevent - * racing calls to vop_vnode_change. - */ - mandlock_after = MANDLOCK(vp, ip->i_d.di_mode); - xfs_iunlock(ip, lock_flags); /* @@ -1443,7 +1305,7 @@ xfs_inactive_attrs( int error; xfs_mount_t *mp; - ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL)); tp = *tpp; mp = ip->i_mount; ASSERT(ip->i_d.di_forkoff != 0); @@ -1491,7 +1353,7 @@ xfs_release( xfs_mount_t *mp = ip->i_mount; int error; - if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0)) + if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0)) return 0; /* If this is a read-only mount, don't do this (would generate I/O) */ @@ -1774,8 +1636,7 @@ xfs_lookup( struct xfs_name *name, xfs_inode_t **ipp) { - xfs_inode_t *ip; - xfs_ino_t e_inum; + xfs_ino_t inum; int error; uint lock_mode; @@ -1785,12 +1646,21 @@ xfs_lookup( return XFS_ERROR(EIO); lock_mode = xfs_ilock_map_shared(dp); - error = xfs_dir_lookup_int(dp, lock_mode, name, &e_inum, &ip); - if (!error) { - *ipp = ip; - xfs_itrace_ref(ip); - } + error = xfs_dir_lookup(NULL, dp, name, &inum); xfs_iunlock_map_shared(dp, lock_mode); + + if (error) + goto out; + + error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0); + if (error) + goto out; + + xfs_itrace_ref(*ipp); + return 0; + + out: + *ipp = NULL; return error; } @@ -1906,7 +1776,7 @@ xfs_create( * It is locked (and joined to the transaction). */ - ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE)); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); /* * Now we join the directory inode to the transaction. We do not do it @@ -2112,7 +1982,7 @@ again: ips[0] = ip; ips[1] = dp; - xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); } /* else e_inum == dp->i_ino */ /* This can happen if we're asked to lock /x/.. @@ -2160,7 +2030,6 @@ void xfs_lock_inodes( xfs_inode_t **ips, int inodes, - int first_locked, uint lock_mode) { int attempts = 0, i, j, try_lock; @@ -2168,13 +2037,8 @@ xfs_lock_inodes( ASSERT(ips && (inodes >= 2)); /* we need at least two */ - if (first_locked) { - try_lock = 1; - i = 1; - } else { - try_lock = 0; - i = 0; - } + try_lock = 0; + i = 0; again: for (; i < inodes; i++) { @@ -2298,29 +2162,14 @@ xfs_remove( return error; } - /* - * We need to get a reference to ip before we get our log - * reservation. The reason for this is that we cannot call - * xfs_iget for an inode for which we do not have a reference - * once we've acquired a log reservation. This is because the - * inode we are trying to get might be in xfs_inactive going - * for a log reservation. Since we'll have to wait for the - * inactive code to complete before returning from xfs_iget, - * we need to make sure that we don't have log space reserved - * when we call xfs_iget. Instead we get an unlocked reference - * to the inode before getting our log reservation. - */ - IHOLD(ip); - xfs_itrace_entry(ip); xfs_itrace_ref(ip); error = XFS_QM_DQATTACH(mp, dp, 0); - if (!error && dp != ip) + if (!error) error = XFS_QM_DQATTACH(mp, ip, 0); if (error) { REMOVE_DEBUG_TRACE(__LINE__); - IRELE(ip); goto std_return; } @@ -2347,7 +2196,6 @@ xfs_remove( ASSERT(error != ENOSPC); REMOVE_DEBUG_TRACE(__LINE__); xfs_trans_cancel(tp, 0); - IRELE(ip); return error; } @@ -2355,7 +2203,6 @@ xfs_remove( if (error) { REMOVE_DEBUG_TRACE(__LINE__); xfs_trans_cancel(tp, cancel_flags); - IRELE(ip); goto std_return; } @@ -2363,23 +2210,18 @@ xfs_remove( * At this point, we've gotten both the directory and the entry * inodes locked. */ + IHOLD(ip); xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); - if (dp != ip) { - /* - * Increment vnode ref count only in this case since - * there's an extra vnode reference in the case where - * dp == ip. - */ - IHOLD(dp); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - } + + IHOLD(dp); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); /* * Entry must exist since we did a lookup in xfs_lock_dir_and_entry. */ XFS_BMAP_INIT(&free_list, &first_block); error = xfs_dir_removename(tp, dp, name, ip->i_ino, - &first_block, &free_list, 0); + &first_block, &free_list, resblks); if (error) { ASSERT(error != ENOENT); REMOVE_DEBUG_TRACE(__LINE__); @@ -2402,12 +2244,6 @@ xfs_remove( link_zero = (ip)->i_d.di_nlink==0; /* - * Take an extra ref on the inode so that it doesn't - * go to xfs_inactive() from within the commit. - */ - IHOLD(ip); - - /* * If this is a synchronous mount, make sure that the * remove transaction goes to disk before returning to * the user. @@ -2423,10 +2259,8 @@ xfs_remove( } error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); - if (error) { - IRELE(ip); + if (error) goto std_return; - } /* * If we are using filestreams, kill the stream association. @@ -2438,7 +2272,6 @@ xfs_remove( xfs_filestream_deassociate(ip); xfs_itrace_exit(ip); - IRELE(ip); /* Fall through to std_return with error = 0 */ std_return: @@ -2467,8 +2300,6 @@ xfs_remove( cancel_flags |= XFS_TRANS_ABORT; xfs_trans_cancel(tp, cancel_flags); - IRELE(ip); - goto std_return; } @@ -2536,7 +2367,7 @@ xfs_link( ips[1] = sip; } - xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL); + xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL); /* * Increment vnode ref counts since xfs_trans_commit & @@ -2840,7 +2671,6 @@ xfs_rmdir( struct xfs_name *name, xfs_inode_t *cdp) { - bhv_vnode_t *dir_vp = XFS_ITOV(dp); xfs_mount_t *mp = dp->i_mount; xfs_trans_t *tp; int error; @@ -2866,27 +2696,12 @@ xfs_rmdir( } /* - * We need to get a reference to cdp before we get our log - * reservation. The reason for this is that we cannot call - * xfs_iget for an inode for which we do not have a reference - * once we've acquired a log reservation. This is because the - * inode we are trying to get might be in xfs_inactive going - * for a log reservation. Since we'll have to wait for the - * inactive code to complete before returning from xfs_iget, - * we need to make sure that we don't have log space reserved - * when we call xfs_iget. Instead we get an unlocked reference - * to the inode before getting our log reservation. - */ - IHOLD(cdp); - - /* * Get the dquots for the inodes. */ error = XFS_QM_DQATTACH(mp, dp, 0); - if (!error && dp != cdp) + if (!error) error = XFS_QM_DQATTACH(mp, cdp, 0); if (error) { - IRELE(cdp); REMOVE_DEBUG_TRACE(__LINE__); goto std_return; } @@ -2913,7 +2728,6 @@ xfs_rmdir( if (error) { ASSERT(error != ENOSPC); cancel_flags = 0; - IRELE(cdp); goto error_return; } XFS_BMAP_INIT(&free_list, &first_block); @@ -2927,21 +2741,13 @@ xfs_rmdir( error = xfs_lock_dir_and_entry(dp, cdp); if (error) { xfs_trans_cancel(tp, cancel_flags); - IRELE(cdp); goto std_return; } + IHOLD(dp); xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); - if (dp != cdp) { - /* - * Only increment the parent directory vnode count if - * we didn't bump it in looking up cdp. The only time - * we don't bump it is when we're looking up ".". - */ - VN_HOLD(dir_vp); - } - xfs_itrace_ref(cdp); + IHOLD(cdp); xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL); ASSERT(cdp->i_d.di_nlink >= 2); @@ -2995,12 +2801,6 @@ xfs_rmdir( last_cdp_link = (cdp)->i_d.di_nlink==0; /* - * Take an extra ref on the child vnode so that it - * does not go to xfs_inactive() from within the commit. - */ - IHOLD(cdp); - - /* * If this is a synchronous mount, make sure that the * rmdir transaction goes to disk before returning to * the user. @@ -3014,19 +2814,15 @@ xfs_rmdir( xfs_bmap_cancel(&free_list); xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT)); - IRELE(cdp); goto std_return; } error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); if (error) { - IRELE(cdp); goto std_return; } - IRELE(cdp); - /* Fall through to std_return with error = 0 or the errno * from xfs_trans_commit. */ std_return: diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h index 24c53923dc2c..8abe8f186e20 100644 --- a/fs/xfs/xfs_vnodeops.h +++ b/fs/xfs/xfs_vnodeops.h @@ -15,7 +15,6 @@ struct xfs_iomap; int xfs_open(struct xfs_inode *ip); -int xfs_getattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags); int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags, struct cred *credp); int xfs_readlink(struct xfs_inode *ip, char *link); @@ -48,9 +47,9 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd, struct cred *credp, int attr_flags); int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, struct xfs_inode *src_ip, struct xfs_inode *target_dp, - struct xfs_name *target_name); + struct xfs_name *target_name, struct xfs_inode *target_ip); int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value, - int *valuelenp, int flags, cred_t *cred); + int *valuelenp, int flags); int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value, int valuelen, int flags); int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags); @@ -61,9 +60,6 @@ int xfs_ioctl(struct xfs_inode *ip, struct file *filp, ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb, const struct iovec *iovp, unsigned int segs, loff_t *offset, int ioflags); -ssize_t xfs_sendfile(struct xfs_inode *ip, struct file *filp, - loff_t *offset, int ioflags, size_t count, - read_actor_t actor, void *target); ssize_t xfs_splice_read(struct xfs_inode *ip, struct file *infilp, loff_t *ppos, struct pipe_inode_info *pipe, size_t count, int flags, int ioflags); |