summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/erofs/utils.c8
-rw-r--r--fs/io_uring.c73
-rw-r--r--fs/iomap/buffered-io.c26
-rw-r--r--fs/ksmbd/smb2pdu.c30
-rw-r--r--fs/netfs/read_helper.c4
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/nfs42proc.c4
-rw-r--r--fs/nfs/nfs42xdr.c3
-rw-r--r--fs/nfs/nfs4state.c4
-rw-r--r--fs/nfs/nfstrace.h1
-rw-r--r--fs/ntfs/Kconfig1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c17
-rw-r--r--fs/xfs/xfs_icache.c21
-rw-r--r--fs/xfs/xfs_inode.h4
14 files changed, 110 insertions, 87 deletions
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
index 84da2c280012..ec9a1d780dc1 100644
--- a/fs/erofs/utils.c
+++ b/fs/erofs/utils.c
@@ -150,7 +150,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance.
*/
- DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp);
+ DBG_BUGON(__xa_erase(&sbi->managed_pslots, grp->index) != grp);
/* last refcount should be connected with its managed pslot. */
erofs_workgroup_unfreeze(grp, 0);
@@ -165,15 +165,19 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned int freed = 0;
unsigned long index;
+ xa_lock(&sbi->managed_pslots);
xa_for_each(&sbi->managed_pslots, index, grp) {
/* try to shrink each valid workgroup */
if (!erofs_try_to_release_workgroup(sbi, grp))
continue;
+ xa_unlock(&sbi->managed_pslots);
++freed;
if (!--nr_shrink)
- break;
+ return freed;
+ xa_lock(&sbi->managed_pslots);
}
+ xa_unlock(&sbi->managed_pslots);
return freed;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index a4c508a1e0cf..c4f217613f56 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1278,6 +1278,7 @@ static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
bool cancel_all)
+ __must_hold(&req->ctx->timeout_lock)
{
struct io_kiocb *req;
@@ -1293,6 +1294,44 @@ static bool io_match_task(struct io_kiocb *head, struct task_struct *task,
return false;
}
+static bool io_match_linked(struct io_kiocb *head)
+{
+ struct io_kiocb *req;
+
+ io_for_each_link(req, head) {
+ if (req->flags & REQ_F_INFLIGHT)
+ return true;
+ }
+ return false;
+}
+
+/*
+ * As io_match_task() but protected against racing with linked timeouts.
+ * User must not hold timeout_lock.
+ */
+static bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
+ bool cancel_all)
+{
+ bool matched;
+
+ if (task && head->task != task)
+ return false;
+ if (cancel_all)
+ return true;
+
+ if (head->flags & REQ_F_LINK_TIMEOUT) {
+ struct io_ring_ctx *ctx = head->ctx;
+
+ /* protect against races with linked timeouts */
+ spin_lock_irq(&ctx->timeout_lock);
+ matched = io_match_linked(head);
+ spin_unlock_irq(&ctx->timeout_lock);
+ } else {
+ matched = io_match_linked(head);
+ }
+ return matched;
+}
+
static inline bool req_has_async_data(struct io_kiocb *req)
{
return req->flags & REQ_F_ASYNC_DATA;
@@ -4327,6 +4366,7 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
kfree(nxt);
if (++i == nbufs)
return i;
+ cond_resched();
}
i++;
kfree(buf);
@@ -5699,17 +5739,15 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
int posted = 0, i;
spin_lock(&ctx->completion_lock);
- spin_lock_irq(&ctx->timeout_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
struct hlist_head *list;
list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
- if (io_match_task(req, tsk, cancel_all))
+ if (io_match_task_safe(req, tsk, cancel_all))
posted += io_poll_remove_one(req);
}
}
- spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
if (posted)
@@ -6158,6 +6196,9 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
return -EFAULT;
+ if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0)
+ return -EINVAL;
+
data->mode = io_translate_timeout_mode(flags);
hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode);
@@ -6882,10 +6923,11 @@ static inline struct file *io_file_get(struct io_ring_ctx *ctx,
static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked)
{
struct io_kiocb *prev = req->timeout.prev;
- int ret;
+ int ret = -ENOENT;
if (prev) {
- ret = io_try_cancel_userdata(req, prev->user_data);
+ if (!(req->task->flags & PF_EXITING))
+ ret = io_try_cancel_userdata(req, prev->user_data);
io_req_complete_post(req, ret ?: -ETIME, 0);
io_put_req(prev);
} else {
@@ -9257,10 +9299,8 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx)
struct io_buffer *buf;
unsigned long index;
- xa_for_each(&ctx->io_buffers, index, buf) {
+ xa_for_each(&ctx->io_buffers, index, buf)
__io_remove_buffers(ctx, buf, index, -1U);
- cond_resched();
- }
}
static void io_req_caches_free(struct io_ring_ctx *ctx)
@@ -9564,19 +9604,8 @@ static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
{
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
struct io_task_cancel *cancel = data;
- bool ret;
-
- if (!cancel->all && (req->flags & REQ_F_LINK_TIMEOUT)) {
- struct io_ring_ctx *ctx = req->ctx;
- /* protect against races with linked timeouts */
- spin_lock_irq(&ctx->timeout_lock);
- ret = io_match_task(req, cancel->task, cancel->all);
- spin_unlock_irq(&ctx->timeout_lock);
- } else {
- ret = io_match_task(req, cancel->task, cancel->all);
- }
- return ret;
+ return io_match_task_safe(req, cancel->task, cancel->all);
}
static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
@@ -9587,14 +9616,12 @@ static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
LIST_HEAD(list);
spin_lock(&ctx->completion_lock);
- spin_lock_irq(&ctx->timeout_lock);
list_for_each_entry_reverse(de, &ctx->defer_list, list) {
- if (io_match_task(de->req, task, cancel_all)) {
+ if (io_match_task_safe(de->req, task, cancel_all)) {
list_cut_position(&list, &ctx->defer_list, &de->list);
break;
}
}
- spin_unlock_irq(&ctx->timeout_lock);
spin_unlock(&ctx->completion_lock);
if (list_empty(&list))
return false;
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 1753c26c8e76..71a36ae120ee 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -205,7 +205,16 @@ struct iomap_readpage_ctx {
struct readahead_control *rac;
};
-static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
+/**
+ * iomap_read_inline_data - copy inline data into the page cache
+ * @iter: iteration structure
+ * @page: page to copy to
+ *
+ * Copy the inline data in @iter into @page and zero out the rest of the page.
+ * Only a single IOMAP_INLINE extent is allowed at the end of each file.
+ * Returns zero for success to complete the read, or the usual negative errno.
+ */
+static int iomap_read_inline_data(const struct iomap_iter *iter,
struct page *page)
{
const struct iomap *iomap = iomap_iter_srcmap(iter);
@@ -214,7 +223,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
void *addr;
if (PageUptodate(page))
- return PAGE_SIZE - poff;
+ return 0;
if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
return -EIO;
@@ -231,7 +240,7 @@ static loff_t iomap_read_inline_data(const struct iomap_iter *iter,
memset(addr + size, 0, PAGE_SIZE - poff - size);
kunmap_local(addr);
iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
- return PAGE_SIZE - poff;
+ return 0;
}
static inline bool iomap_block_needs_zeroing(const struct iomap_iter *iter,
@@ -257,7 +266,7 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
sector_t sector;
if (iomap->type == IOMAP_INLINE)
- return min(iomap_read_inline_data(iter, page), length);
+ return iomap_read_inline_data(iter, page);
/* zero post-eof blocks as the page may be mapped */
iop = iomap_page_create(iter->inode, page);
@@ -370,6 +379,8 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
ctx->cur_page_in_bio = false;
}
ret = iomap_readpage_iter(iter, ctx, done);
+ if (ret <= 0)
+ return ret;
}
return done;
@@ -580,15 +591,10 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
static int iomap_write_begin_inline(const struct iomap_iter *iter,
struct page *page)
{
- int ret;
-
/* needs more work for the tailpacking case; disable for now */
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
return -EIO;
- ret = iomap_read_inline_data(iter, page);
- if (ret < 0)
- return ret;
- return 0;
+ return iomap_read_inline_data(iter, page);
}
static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 121f8e8c70ac..49c9da37315c 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -1697,8 +1697,10 @@ int smb2_sess_setup(struct ksmbd_work *work)
negblob_off = le16_to_cpu(req->SecurityBufferOffset);
negblob_len = le16_to_cpu(req->SecurityBufferLength);
if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
- negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
- return -EINVAL;
+ negblob_len < offsetof(struct negotiate_message, NegotiateFlags)) {
+ rc = -EINVAL;
+ goto out_err;
+ }
negblob = (struct negotiate_message *)((char *)&req->hdr.ProtocolId +
negblob_off);
@@ -4457,6 +4459,12 @@ static void get_file_stream_info(struct ksmbd_work *work,
&stat);
file_info = (struct smb2_file_stream_info *)rsp->Buffer;
+ buf_free_len =
+ smb2_calc_max_out_buf_len(work, 8,
+ le32_to_cpu(req->OutputBufferLength));
+ if (buf_free_len < 0)
+ goto out;
+
xattr_list_len = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
if (xattr_list_len < 0) {
goto out;
@@ -4465,12 +4473,6 @@ static void get_file_stream_info(struct ksmbd_work *work,
goto out;
}
- buf_free_len =
- smb2_calc_max_out_buf_len(work, 8,
- le32_to_cpu(req->OutputBufferLength));
- if (buf_free_len < 0)
- goto out;
-
while (idx < xattr_list_len) {
stream_name = xattr_list + idx;
streamlen = strlen(stream_name);
@@ -4496,8 +4498,10 @@ static void get_file_stream_info(struct ksmbd_work *work,
":%s", &stream_name[XATTR_NAME_STREAM_LEN]);
next = sizeof(struct smb2_file_stream_info) + streamlen * 2;
- if (next > buf_free_len)
+ if (next > buf_free_len) {
+ kfree(stream_buf);
break;
+ }
file_info = (struct smb2_file_stream_info *)&rsp->Buffer[nbytes];
streamlen = smbConvertToUTF16((__le16 *)file_info->StreamName,
@@ -4514,6 +4518,7 @@ static void get_file_stream_info(struct ksmbd_work *work,
file_info->NextEntryOffset = cpu_to_le32(next);
}
+out:
if (!S_ISDIR(stat.mode) &&
buf_free_len >= sizeof(struct smb2_file_stream_info) + 7 * 2) {
file_info = (struct smb2_file_stream_info *)
@@ -4522,14 +4527,13 @@ static void get_file_stream_info(struct ksmbd_work *work,
"::$DATA", 7, conn->local_nls, 0);
streamlen *= 2;
file_info->StreamNameLength = cpu_to_le32(streamlen);
- file_info->StreamSize = 0;
- file_info->StreamAllocationSize = 0;
+ file_info->StreamSize = cpu_to_le64(stat.size);
+ file_info->StreamAllocationSize = cpu_to_le64(stat.blocks << 9);
nbytes += sizeof(struct smb2_file_stream_info) + streamlen;
}
/* last entry offset should be 0 */
file_info->NextEntryOffset = 0;
-out:
kvfree(xattr_list);
rsp->OutputBufferLength = cpu_to_le32(nbytes);
@@ -5068,7 +5072,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
if (addition_info & ~(OWNER_SECINFO | GROUP_SECINFO | DACL_SECINFO |
PROTECTED_DACL_SECINFO |
UNPROTECTED_DACL_SECINFO)) {
- pr_err("Unsupported addition info: 0x%x)\n",
+ ksmbd_debug(SMB, "Unsupported addition info: 0x%x)\n",
addition_info);
pntsd->revision = cpu_to_le16(1);
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 9320a42dfaf9..7046f9bdd8dc 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -1008,8 +1008,8 @@ out:
}
EXPORT_SYMBOL(netfs_readpage);
-/**
- * netfs_skip_folio_read - prep a folio for writing without reading first
+/*
+ * Prepare a folio for writing without reading first
* @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index dd53704c3f40..fda530d5e764 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -219,6 +219,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
NFS_INO_DATA_INVAL_DEFER);
else if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
+ trace_nfs_set_cache_invalid(inode, 0);
}
EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 08355b66e7cb..8b21ff1be717 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -289,7 +289,9 @@ static void nfs42_copy_dest_done(struct inode *inode, loff_t pos, loff_t len)
loff_t newsize = pos + len;
loff_t end = newsize - 1;
- truncate_pagecache_range(inode, pos, end);
+ WARN_ON_ONCE(invalidate_inode_pages2_range(inode->i_mapping,
+ pos >> PAGE_SHIFT, end >> PAGE_SHIFT));
+
spin_lock(&inode->i_lock);
if (newsize > i_size_read(inode))
i_size_write(inode, newsize);
diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
index c8bad735e4c1..271e5f92ed01 100644
--- a/fs/nfs/nfs42xdr.c
+++ b/fs/nfs/nfs42xdr.c
@@ -1434,8 +1434,7 @@ static int nfs4_xdr_dec_clone(struct rpc_rqst *rqstp,
status = decode_clone(xdr);
if (status)
goto out;
- status = decode_getfattr(xdr, res->dst_fattr, res->server);
-
+ decode_getfattr(xdr, res->dst_fattr, res->server);
out:
res->rpc_status = status;
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index ecc4594299d6..f63dfa01001c 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1998,6 +1998,10 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
dprintk("%s: exit with error %d for server %s\n",
__func__, -EPROTONOSUPPORT, clp->cl_hostname);
return -EPROTONOSUPPORT;
+ case -ENOSPC:
+ if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
+ nfs_mark_client_ready(clp, -EIO);
+ return -EIO;
case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
* in nfs4_exchange_id */
default:
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 21dac847f1e4..b3aee261801e 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -162,6 +162,7 @@ DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit);
DEFINE_NFS_INODE_EVENT(nfs_fsync_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_fsync_exit);
DEFINE_NFS_INODE_EVENT(nfs_access_enter);
+DEFINE_NFS_INODE_EVENT_DONE(nfs_set_cache_invalid);
TRACE_EVENT(nfs_access_exit,
TP_PROTO(
diff --git a/fs/ntfs/Kconfig b/fs/ntfs/Kconfig
index 1667a7e590d8..f93e69a61283 100644
--- a/fs/ntfs/Kconfig
+++ b/fs/ntfs/Kconfig
@@ -52,6 +52,7 @@ config NTFS_DEBUG
config NTFS_RW
bool "NTFS write support"
depends on NTFS_FS
+ depends on PAGE_SIZE_LESS_THAN_64KB
help
This enables the partial, but safe, write support in the NTFS driver.
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index fbc9d816882c..23523b802539 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -1077,21 +1077,18 @@ xfs_attr_node_hasname(
state = xfs_da_state_alloc(args);
if (statep != NULL)
- *statep = NULL;
+ *statep = state;
/*
* Search to see if name exists, and get back a pointer to it.
*/
error = xfs_da3_node_lookup_int(state, &retval);
- if (error) {
- xfs_da_state_free(state);
- return error;
- }
+ if (error)
+ retval = error;
- if (statep != NULL)
- *statep = state;
- else
+ if (!statep)
xfs_da_state_free(state);
+
return retval;
}
@@ -1112,7 +1109,7 @@ xfs_attr_node_addname_find_attr(
*/
retval = xfs_attr_node_hasname(args, &dac->da_state);
if (retval != -ENOATTR && retval != -EEXIST)
- return retval;
+ goto error;
if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE))
goto error;
@@ -1337,7 +1334,7 @@ int xfs_attr_node_removename_setup(
error = xfs_attr_node_hasname(args, state);
if (error != -EEXIST)
- return error;
+ goto out;
error = 0;
ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index e1472004170e..da4af2142a2b 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -289,22 +289,6 @@ xfs_perag_clear_inode_tag(
trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
}
-static inline void
-xfs_inew_wait(
- struct xfs_inode *ip)
-{
- wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
- DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
-
- do {
- prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
- if (!xfs_iflags_test(ip, XFS_INEW))
- break;
- schedule();
- } while (true);
- finish_wait(wq, &wait.wq_entry);
-}
-
/*
* When we recycle a reclaimable inode, we need to re-initialise the VFS inode
* part of the structure. This is made more complex by the fact we store
@@ -368,18 +352,13 @@ xfs_iget_recycle(
ASSERT(!rwsem_is_locked(&inode->i_rwsem));
error = xfs_reinit_inode(mp, inode);
if (error) {
- bool wake;
-
/*
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
- wake = !!__xfs_iflags_test(ip, XFS_INEW);
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
- if (wake)
- wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
spin_unlock(&ip->i_flags_lock);
rcu_read_unlock();
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index e635a3d64cba..c447bf04205a 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -231,8 +231,7 @@ static inline bool xfs_inode_has_bigtime(struct xfs_inode *ip)
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
#define XFS_ISTALE (1 << 1) /* inode has been staled */
#define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
-#define __XFS_INEW_BIT 3 /* inode has just been allocated */
-#define XFS_INEW (1 << __XFS_INEW_BIT)
+#define XFS_INEW (1 << 3) /* inode has just been allocated */
#define XFS_IPRESERVE_DM_FIELDS (1 << 4) /* has legacy DMAPI fields set */
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
@@ -492,7 +491,6 @@ static inline void xfs_finish_inode_setup(struct xfs_inode *ip)
xfs_iflags_clear(ip, XFS_INEW);
barrier();
unlock_new_inode(VFS_I(ip));
- wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
}
static inline void xfs_setup_existing_inode(struct xfs_inode *ip)