summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c148
-rw-r--r--fs/btrfs/extent_io.c5
-rw-r--r--fs/btrfs/inode.c7
-rw-r--r--fs/btrfs/ioctl.c10
-rw-r--r--fs/btrfs/qgroup.c17
-rw-r--r--fs/ceph/inode.c1
-rw-r--r--fs/cifs/cifs_debug.c29
-rw-r--r--fs/cifs/cifsencrypt.c20
-rw-r--r--fs/cifs/cifsglob.h40
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/cifssmb.c8
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/inode.c13
-rw-r--r--fs/cifs/misc.c9
-rw-r--r--fs/cifs/smb2misc.c19
-rw-r--r--fs/cifs/smb2ops.c277
-rw-r--r--fs/cifs/smb2pdu.c333
-rw-r--r--fs/cifs/smb2pdu.h23
-rw-r--r--fs/cifs/smb2proto.h6
-rw-r--r--fs/cifs/smb2transport.c18
-rw-r--r--fs/cifs/smbdirect.c19
-rw-r--r--fs/cifs/trace.h3
-rw-r--r--fs/cifs/transport.c173
-rw-r--r--fs/eventfd.c19
-rw-r--r--fs/eventpoll.c15
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/super.c6
-rw-r--r--fs/jfs/xattr.c10
-rw-r--r--fs/nfs/delegation.c4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c21
-rw-r--r--fs/nfs/nfs4proc.c33
-rw-r--r--fs/nfs/pnfs.h5
-rw-r--r--fs/pipe.c22
-rw-r--r--fs/proc/base.c28
-rw-r--r--fs/proc/generic.c11
-rw-r--r--fs/quota/dquot.c7
-rw-r--r--fs/select.c23
-rw-r--r--fs/timerfd.c22
-rw-r--r--fs/udf/balloc.c5
-rw-r--r--fs/udf/directory.c8
-rw-r--r--fs/udf/inode.c8
-rw-r--r--fs/udf/namei.c14
-rw-r--r--fs/udf/udfdecl.h9
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c31
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c26
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h2
-rw-r--r--fs/xfs/libxfs/xfs_format.h5
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c76
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c106
-rw-r--r--fs/xfs/xfs_fsmap.c4
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_inode.c57
-rw-r--r--fs/xfs/xfs_iomap.c15
-rw-r--r--fs/xfs/xfs_trans.c7
55 files changed, 1116 insertions, 684 deletions
diff --git a/fs/aio.c b/fs/aio.c
index e1d20124ec0e..210df9da1283 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,7 +5,6 @@
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
- * Copyright 2018 Christoph Hellwig.
*
* See ../COPYING for licensing terms.
*/
@@ -165,22 +164,10 @@ struct fsync_iocb {
bool datasync;
};
-struct poll_iocb {
- struct file *file;
- __poll_t events;
- struct wait_queue_head *head;
-
- union {
- struct wait_queue_entry wait;
- struct work_struct work;
- };
-};
-
struct aio_kiocb {
union {
struct kiocb rw;
struct fsync_iocb fsync;
- struct poll_iocb poll;
};
struct kioctx *ki_ctx;
@@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
iocb->aio_rw_flags))
return -EINVAL;
+
req->file = fget(iocb->aio_fildes);
if (unlikely(!req->file))
return -EBADF;
@@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
return 0;
}
-/* need to use list_del_init so we can check if item was present */
-static inline bool __aio_poll_remove(struct poll_iocb *req)
-{
- if (list_empty(&req->wait.entry))
- return false;
- list_del_init(&req->wait.entry);
- return true;
-}
-
-static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
- fput(iocb->poll.file);
- aio_complete(iocb, mangle_poll(mask), 0);
-}
-
-static void aio_poll_work(struct work_struct *work)
-{
- struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
-
- if (!list_empty_careful(&iocb->ki_list))
- aio_remove_iocb(iocb);
- __aio_poll_complete(iocb, iocb->poll.events);
-}
-
-static int aio_poll_cancel(struct kiocb *iocb)
-{
- struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
- struct poll_iocb *req = &aiocb->poll;
- struct wait_queue_head *head = req->head;
- bool found = false;
-
- spin_lock(&head->lock);
- found = __aio_poll_remove(req);
- spin_unlock(&head->lock);
-
- if (found) {
- req->events = 0;
- INIT_WORK(&req->work, aio_poll_work);
- schedule_work(&req->work);
- }
- return 0;
-}
-
-static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
- void *key)
-{
- struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
- struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
- struct file *file = req->file;
- __poll_t mask = key_to_poll(key);
-
- assert_spin_locked(&req->head->lock);
-
- /* for instances that support it check for an event match first: */
- if (mask && !(mask & req->events))
- return 0;
-
- mask = file->f_op->poll_mask(file, req->events) & req->events;
- if (!mask)
- return 0;
-
- __aio_poll_remove(req);
-
- /*
- * Try completing without a context switch if we can acquire ctx_lock
- * without spinning. Otherwise we need to defer to a workqueue to
- * avoid a deadlock due to the lock order.
- */
- if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
- list_del_init(&iocb->ki_list);
- spin_unlock(&iocb->ki_ctx->ctx_lock);
-
- __aio_poll_complete(iocb, mask);
- } else {
- req->events = mask;
- INIT_WORK(&req->work, aio_poll_work);
- schedule_work(&req->work);
- }
-
- return 1;
-}
-
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
-{
- struct kioctx *ctx = aiocb->ki_ctx;
- struct poll_iocb *req = &aiocb->poll;
- __poll_t mask;
-
- /* reject any unknown events outside the normal event mask. */
- if ((u16)iocb->aio_buf != iocb->aio_buf)
- return -EINVAL;
- /* reject fields that are not defined for poll */
- if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
- return -EINVAL;
-
- req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
- req->file = fget(iocb->aio_fildes);
- if (unlikely(!req->file))
- return -EBADF;
- if (!file_has_poll_mask(req->file))
- goto out_fail;
-
- req->head = req->file->f_op->get_poll_head(req->file, req->events);
- if (!req->head)
- goto out_fail;
- if (IS_ERR(req->head)) {
- mask = EPOLLERR;
- goto done;
- }
-
- init_waitqueue_func_entry(&req->wait, aio_poll_wake);
- aiocb->ki_cancel = aio_poll_cancel;
-
- spin_lock_irq(&ctx->ctx_lock);
- spin_lock(&req->head->lock);
- mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
- if (!mask) {
- __add_wait_queue(req->head, &req->wait);
- list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
- }
- spin_unlock(&req->head->lock);
- spin_unlock_irq(&ctx->ctx_lock);
-done:
- if (mask)
- __aio_poll_complete(aiocb, mask);
- return 0;
-out_fail:
- fput(req->file);
- return -EINVAL; /* same as no support for IOCB_CMD_POLL */
-}
-
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
@@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
case IOCB_CMD_FDSYNC:
ret = aio_fsync(&req->fsync, &iocb, true);
break;
- case IOCB_CMD_POLL:
- ret = aio_poll(req, &iocb);
- break;
default:
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
ret = -EINVAL;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cce6087d6880..e55843f536bc 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4542,8 +4542,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
offset_in_extent = em_start - em->start;
em_end = extent_map_end(em);
em_len = em_end - em_start;
- disko = em->block_start + offset_in_extent;
flags = 0;
+ if (em->block_start < EXTENT_MAP_LAST_BYTE)
+ disko = em->block_start + offset_in_extent;
+ else
+ disko = 0;
/*
* bump off for our next call to get_extent
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e9482f0db9d0..eba61bcb9bb3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9005,13 +9005,14 @@ again:
unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
-out_unlock:
if (!ret2) {
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
sb_end_pagefault(inode->i_sb);
extent_changeset_free(data_reserved);
return VM_FAULT_LOCKED;
}
+
+out_unlock:
unlock_page(page);
out:
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
@@ -9443,6 +9444,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
u64 new_idx = 0;
u64 root_objectid;
int ret;
+ int ret2;
bool root_log_pinned = false;
bool dest_log_pinned = false;
@@ -9639,7 +9641,8 @@ out_fail:
dest_log_pinned = false;
}
}
- ret = btrfs_end_transaction(trans);
+ ret2 = btrfs_end_transaction(trans);
+ ret = ret ? ret : ret2;
out_notrans:
if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c2837a32d689..43ecbe620dea 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3577,7 +3577,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
dst, dst_loff, &cmp);
if (ret)
- goto out_unlock;
+ goto out_free;
loff += BTRFS_MAX_DEDUPE_LEN;
dst_loff += BTRFS_MAX_DEDUPE_LEN;
@@ -3587,16 +3587,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
ret = btrfs_extent_same_range(src, loff, tail_len, dst,
dst_loff, &cmp);
+out_free:
+ kvfree(cmp.src_pages);
+ kvfree(cmp.dst_pages);
+
out_unlock:
if (same_inode)
inode_unlock(src);
else
btrfs_double_inode_unlock(src, dst);
-out_free:
- kvfree(cmp.src_pages);
- kvfree(cmp.dst_pages);
-
return ret;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 1874a6d2e6f5..c25dc47210a3 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2680,8 +2680,10 @@ out:
free_extent_buffer(scratch_leaf);
}
- if (done && !ret)
+ if (done && !ret) {
ret = 1;
+ fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+ }
return ret;
}
@@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
if (!init_flags) {
/* we're resuming qgroup rescan at mount time */
- if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN))
+ if (!(fs_info->qgroup_flags &
+ BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
- else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+ ret = -EINVAL;
+ } else if (!(fs_info->qgroup_flags &
+ BTRFS_QGROUP_STATUS_FLAG_ON)) {
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup rescan is not queued");
- return -EINVAL;
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
}
mutex_lock(&fs_info->qgroup_rescan_lock);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ee764ac352ab..a866be999216 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
if (IS_ERR(realdn)) {
pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
PTR_ERR(realdn), dn, in, ceph_vinop(in));
+ dput(dn);
dn = realdn; /* note realdn contains the error */
goto out;
} else if (realdn) {
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 116146022aa1..bfe999505815 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -126,6 +126,25 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
seq_putc(m, '\n');
}
+static void
+cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+{
+ struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+ struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+
+ seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+ seq_puts(m, "\t\tCapabilities: ");
+ if (iface->rdma_capable)
+ seq_puts(m, "rdma ");
+ if (iface->rss_capable)
+ seq_puts(m, "rss ");
+ seq_putc(m, '\n');
+ if (iface->sockaddr.ss_family == AF_INET)
+ seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+ else if (iface->sockaddr.ss_family == AF_INET6)
+ seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+}
+
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
struct list_head *tmp1, *tmp2, *tmp3;
@@ -312,6 +331,16 @@ skip_rdma:
mid_entry->mid);
}
spin_unlock(&GlobalMid_Lock);
+
+ spin_lock(&ses->iface_lock);
+ if (ses->iface_count)
+ seq_printf(m, "\n\tServer interfaces: %zu\n",
+ ses->iface_count);
+ for (j = 0; j < ses->iface_count; j++) {
+ seq_printf(m, "\t%d)\n", j);
+ cifs_dump_iface(m, &ses->iface_list[j]);
+ }
+ spin_unlock(&ses->iface_lock);
}
}
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 937251cc61c0..ee2a8ec70056 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -37,7 +37,6 @@
#include <crypto/aead.h>
int __cifs_calc_signature(struct smb_rqst *rqst,
- int start,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash)
{
@@ -45,16 +44,27 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
int rc;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
+ int is_smb2 = server->vals->header_preamble_size == 0;
- for (i = start; i < n_vec; i++) {
+ /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+ if (is_smb2) {
+ if (iov[0].iov_len <= 4)
+ return -EIO;
+ i = 0;
+ } else {
+ if (n_vec < 2 || iov[0].iov_len != 4)
+ return -EIO;
+ i = 1; /* skip rfc1002 length */
+ }
+
+ for (; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cifs_dbg(VFS, "null iovec entry\n");
return -EIO;
}
- if (i == 1 && iov[1].iov_len <= 4)
- break; /* nothing to sign or corrupt header */
+
rc = crypto_shash_update(shash,
iov[i].iov_base, iov[i].iov_len);
if (rc) {
@@ -118,7 +128,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
return rc;
}
- return __cifs_calc_signature(rqst, 1, server, signature,
+ return __cifs_calc_signature(rqst, server, signature,
&server->secmech.sdescmd5->shash);
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1efa2e65bc1a..bd78da59a4fd 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -33,6 +33,9 @@
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
/*
* The sizes of various internal tables and strings
*/
@@ -312,6 +315,10 @@ struct smb_version_operations {
/* send echo request */
int (*echo)(struct TCP_Server_Info *);
/* create directory */
+ int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
+ const char *full_path,
+ struct cifs_sb_info *cifs_sb);
int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
struct cifs_sb_info *);
/* set info on created directory */
@@ -838,6 +845,13 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
#endif
+struct cifs_server_iface {
+ size_t speed;
+ unsigned int rdma_capable : 1;
+ unsigned int rss_capable : 1;
+ struct sockaddr_storage sockaddr;
+};
+
/*
* Session structure. One of these for each uid session with a particular host
*/
@@ -875,6 +889,20 @@ struct cifs_ses {
#ifdef CONFIG_CIFS_SMB311
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
#endif /* 3.1.1 */
+
+ /*
+ * Network interfaces available on the server this session is
+ * connected to.
+ *
+ * Other channels can be opened by connecting and binding this
+ * session to interfaces from this list.
+ *
+ * iface_lock should be taken when accessing any of these fields
+ */
+ spinlock_t iface_lock;
+ struct cifs_server_iface *iface_list;
+ size_t iface_count;
+ unsigned long iface_last_update; /* jiffies */
};
static inline bool
@@ -883,6 +911,14 @@ cap_unix(struct cifs_ses *ses)
return ses->server->vals->cap_unix & ses->capabilities;
}
+struct cached_fid {
+ bool is_valid:1; /* Do we have a useable root fid */
+ struct cifs_fid *fid;
+ struct mutex fid_mutex;
+ struct cifs_tcon *tcon;
+ struct work_struct lease_break;
+};
+
/*
* there is one of these for each connection to a resource on a particular
* session
@@ -987,9 +1023,7 @@ struct cifs_tcon {
struct fscache_cookie *fscache; /* cookie for share */
#endif
struct list_head pending_opens; /* list of incomplete opens */
- bool valid_root_fid:1; /* Do we have a useable root fid */
- struct mutex prfid_mutex; /* prevents reopen race after dead ses*/
- struct cifs_fid *prfid; /* handle to the directory at top of share */
+ struct cached_fid crfid; /* Cached root fid */
/* BB add field for back pointer to sb struct(s)? */
};
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 4e0d183c3d10..03018be17283 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -112,10 +112,6 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */, const int flags,
struct kvec * /* resp vec */);
-extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
- struct kvec *pkvec, int nvec_to_send,
- int *pbuftype, const int flags,
- struct kvec *presp);
extern int SendReceiveBlockingLock(const unsigned int xid,
struct cifs_tcon *ptcon,
struct smb_hdr *in_buf ,
@@ -544,7 +540,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst, int start,
+int __cifs_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash);
enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -552,6 +548,7 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
void cifs_aio_ctx_release(struct kref *refcount);
int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+void smb2_cached_lease_break(struct work_struct *work);
int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
struct sdesc **sdesc);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 42329b25877d..d352da325de3 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -107,10 +107,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
}
spin_unlock(&tcon->open_file_lock);
- mutex_lock(&tcon->prfid_mutex);
- tcon->valid_root_fid = false;
- memset(tcon->prfid, 0, sizeof(struct cifs_fid));
- mutex_unlock(&tcon->prfid_mutex);
+ mutex_lock(&tcon->crfid.fid_mutex);
+ tcon->crfid.is_valid = false;
+ memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
+ mutex_unlock(&tcon->crfid.fid_mutex);
/*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 96645a7d8f27..a57da1b88bdf 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -57,9 +57,6 @@
#include "smb2proto.h"
#include "smbdirect.h"
-#define CIFS_PORT 445
-#define RFC1001_PORT 139
-
extern mempool_t *cifs_req_poolp;
extern bool disable_legacy_dialects;
@@ -3029,8 +3026,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
#ifdef CONFIG_CIFS_SMB311
if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
- if (ses->server->vals->protocol_id == SMB311_PROT_ID)
+ if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
tcon->posix_extensions = true;
+ printk_once(KERN_WARNING
+ "SMB3.11 POSIX Extensions are experimental\n");
+ }
}
#endif /* 311 */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f4697f548a39..a2cfb33e85c1 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1575,6 +1575,17 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
goto mkdir_out;
}
+ server = tcon->ses->server;
+
+#ifdef CONFIG_CIFS_SMB311
+ if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+ rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+ cifs_sb);
+ d_drop(direntry); /* for time being always refresh inode info */
+ goto mkdir_out;
+ }
+#endif /* SMB311 */
+
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1583,8 +1594,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
goto mkdir_out;
}
- server = tcon->ses->server;
-
if (!server->ops->mkdir) {
rc = -ENOSYS;
goto mkdir_out;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index af29ade195c0..53e8362cbc4a 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -82,6 +82,7 @@ sesInfoAlloc(void)
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
INIT_LIST_HEAD(&ret_buf->tcon_list);
mutex_init(&ret_buf->session_mutex);
+ spin_lock_init(&ret_buf->iface_lock);
}
return ret_buf;
}
@@ -102,6 +103,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kzfree(buf_to_free->auth_key.response);
+ kfree(buf_to_free->iface_list);
kzfree(buf_to_free);
}
@@ -117,8 +119,9 @@ tconInfoAlloc(void)
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
spin_lock_init(&ret_buf->open_file_lock);
- mutex_init(&ret_buf->prfid_mutex);
- ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
+ mutex_init(&ret_buf->crfid.fid_mutex);
+ ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
+ GFP_KERNEL);
#ifdef CONFIG_CIFS_STATS
spin_lock_init(&ret_buf->stat_lock);
#endif
@@ -136,7 +139,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
atomic_dec(&tconInfoAllocCount);
kfree(buf_to_free->nativeFileSystem);
kzfree(buf_to_free->password);
- kfree(buf_to_free->prfid);
+ kfree(buf_to_free->crfid.fid);
kfree(buf_to_free);
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index e2bec47c6845..3ff7cec2da81 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -454,7 +454,8 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
#ifdef CONFIG_CIFS_SMB311
/* SMB311 POSIX extensions paths do not include leading slash */
else if (cifs_sb_master_tlink(cifs_sb) &&
- cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
+ cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+ (from[0] == '/')) {
start_of_path = from + 1;
}
#endif /* 311 */
@@ -492,10 +493,11 @@ cifs_ses_oplock_break(struct work_struct *work)
{
struct smb2_lease_break_work *lw = container_of(work,
struct smb2_lease_break_work, lease_break);
- int rc;
+ int rc = 0;
rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
lw->lease_state);
+
cifs_dbg(FYI, "Lease release rc %d\n", rc);
cifs_put_tlink(lw->tlink);
kfree(lw);
@@ -561,6 +563,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
open->oplock = lease_state;
}
+
return found;
}
@@ -603,6 +606,18 @@ smb2_is_valid_lease_break(char *buffer)
return true;
}
spin_unlock(&tcon->open_file_lock);
+
+ if (tcon->crfid.is_valid &&
+ !memcmp(rsp->LeaseKey,
+ tcon->crfid.fid->lease_key,
+ SMB2_LEASE_KEY_SIZE)) {
+ INIT_WORK(&tcon->crfid.lease_break,
+ smb2_cached_lease_break);
+ queue_work(cifsiod_wq,
+ &tcon->crfid.lease_break);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
}
}
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index b15f5957d645..0356b5559c71 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -294,34 +294,191 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return rsize;
}
-#ifdef CONFIG_CIFS_STATS2
+
+static int
+parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ size_t buf_len,
+ struct cifs_server_iface **iface_list,
+ size_t *iface_count)
+{
+ struct network_interface_info_ioctl_rsp *p;
+ struct sockaddr_in *addr4;
+ struct sockaddr_in6 *addr6;
+ struct iface_info_ipv4 *p4;
+ struct iface_info_ipv6 *p6;
+ struct cifs_server_iface *info;
+ ssize_t bytes_left;
+ size_t next = 0;
+ int nb_iface = 0;
+ int rc = 0;
+
+ *iface_list = NULL;
+ *iface_count = 0;
+
+ /*
+ * Fist pass: count and sanity check
+ */
+
+ bytes_left = buf_len;
+ p = buf;
+ while (bytes_left >= sizeof(*p)) {
+ nb_iface++;
+ next = le32_to_cpu(p->Next);
+ if (!next) {
+ bytes_left -= sizeof(*p);
+ break;
+ }
+ p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+ bytes_left -= next;
+ }
+
+ if (!nb_iface) {
+ cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (bytes_left || p->Next)
+ cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+
+ /*
+ * Second pass: extract info to internal structure
+ */
+
+ *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
+ if (!*iface_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ info = *iface_list;
+ bytes_left = buf_len;
+ p = buf;
+ while (bytes_left >= sizeof(*p)) {
+ info->speed = le64_to_cpu(p->LinkSpeed);
+ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+
+ cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+ cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+ cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+ le32_to_cpu(p->Capability));
+
+ switch (p->Family) {
+ /*
+ * The kernel and wire socket structures have the same
+ * layout and use network byte order but make the
+ * conversion explicit in case either one changes.
+ */
+ case INTERNETWORK:
+ addr4 = (struct sockaddr_in *)&info->sockaddr;
+ p4 = (struct iface_info_ipv4 *)p->Buffer;
+ addr4->sin_family = AF_INET;
+ memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+
+ /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+ addr4->sin_port = cpu_to_be16(CIFS_PORT);
+
+ cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+ &addr4->sin_addr);
+ break;
+ case INTERNETWORKV6:
+ addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+ p6 = (struct iface_info_ipv6 *)p->Buffer;
+ addr6->sin6_family = AF_INET6;
+ memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+
+ /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+ addr6->sin6_flowinfo = 0;
+ addr6->sin6_scope_id = 0;
+ addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+
+ cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+ &addr6->sin6_addr);
+ break;
+ default:
+ cifs_dbg(VFS,
+ "%s: skipping unsupported socket family\n",
+ __func__);
+ goto next_iface;
+ }
+
+ (*iface_count)++;
+ info++;
+next_iface:
+ next = le32_to_cpu(p->Next);
+ if (!next)
+ break;
+ p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+ bytes_left -= next;
+ }
+
+ if (!*iface_count) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ if (rc) {
+ kfree(*iface_list);
+ *iface_count = 0;
+ *iface_list = NULL;
+ }
+ return rc;
+}
+
+
static int
SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
{
int rc;
unsigned int ret_data_len = 0;
- struct network_interface_info_ioctl_rsp *out_buf;
+ struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ struct cifs_server_iface *iface_list;
+ size_t iface_count;
+ struct cifs_ses *ses = tcon->ses;
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
NULL /* no data input */, 0 /* no data input */,
(char **)&out_buf, &ret_data_len);
- if (rc != 0)
+ if (rc != 0) {
cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
- else if (ret_data_len < sizeof(struct network_interface_info_ioctl_rsp)) {
- cifs_dbg(VFS, "server returned bad net interface info buf\n");
- rc = -EINVAL;
- } else {
- /* Dump info on first interface */
- cifs_dbg(FYI, "Adapter Capability 0x%x\t",
- le32_to_cpu(out_buf->Capability));
- cifs_dbg(FYI, "Link Speed %lld\n",
- le64_to_cpu(out_buf->LinkSpeed));
+ goto out;
}
+
+ rc = parse_server_interfaces(out_buf, ret_data_len,
+ &iface_list, &iface_count);
+ if (rc)
+ goto out;
+
+ spin_lock(&ses->iface_lock);
+ kfree(ses->iface_list);
+ ses->iface_list = iface_list;
+ ses->iface_count = iface_count;
+ ses->iface_last_update = jiffies;
+ spin_unlock(&ses->iface_lock);
+
+out:
kfree(out_buf);
return rc;
}
-#endif /* STATS2 */
+
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+ struct cached_fid *cfid = container_of(work,
+ struct cached_fid, lease_break);
+ mutex_lock(&cfid->fid_mutex);
+ if (cfid->is_valid) {
+ cifs_dbg(FYI, "clear cached root file handle\n");
+ SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
+ cfid->fid->volatile_fid);
+ cfid->is_valid = false;
+ }
+ mutex_unlock(&cfid->fid_mutex);
+}
/*
* Open the directory at the root of a share
@@ -331,13 +488,13 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
struct cifs_open_parms oparams;
int rc;
__le16 srch_path = 0; /* Null - since an open of top of share */
- u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ u8 oplock = SMB2_OPLOCK_LEVEL_II;
- mutex_lock(&tcon->prfid_mutex);
- if (tcon->valid_root_fid) {
+ mutex_lock(&tcon->crfid.fid_mutex);
+ if (tcon->crfid.is_valid) {
cifs_dbg(FYI, "found a cached root file handle\n");
- memcpy(pfid, tcon->prfid, sizeof(struct cifs_fid));
- mutex_unlock(&tcon->prfid_mutex);
+ memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+ mutex_unlock(&tcon->crfid.fid_mutex);
return 0;
}
@@ -350,10 +507,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
if (rc == 0) {
- memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
- tcon->valid_root_fid = true;
+ memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+ tcon->crfid.tcon = tcon;
+ tcon->crfid.is_valid = true;
}
- mutex_unlock(&tcon->prfid_mutex);
+ mutex_unlock(&tcon->crfid.fid_mutex);
return rc;
}
@@ -383,9 +541,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
if (rc)
return;
-#ifdef CONFIG_CIFS_STATS2
SMB3_request_interfaces(xid, tcon);
-#endif /* STATS2 */
SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
FS_ATTRIBUTE_INFORMATION);
@@ -436,7 +592,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
- if ((*full_path == 0) && tcon->valid_root_fid)
+ if ((*full_path == 0) && tcon->crfid.is_valid)
return 0;
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -2151,7 +2307,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
struct smb_rqst *old_rq)
{
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
@@ -2171,14 +2327,13 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
}
/* Assumes:
- * rqst->rq_iov[0] is rfc1002 length
- * rqst->rq_iov[1] is tranform header
- * rqst->rq_iov[2+] data to be encrypted/decrypted
+ * rqst->rq_iov[0] is transform header
+ * rqst->rq_iov[1+] data to be encrypted/decrypted
*/
static struct scatterlist *
init_sg(struct smb_rqst *rqst, u8 *sign)
{
- unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages;
+ unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
struct scatterlist *sg;
unsigned int i;
@@ -2189,10 +2344,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
return NULL;
sg_init_table(sg, sg_len);
- smb2_sg_set_buf(&sg[0], rqst->rq_iov[1].iov_base + 20, assoc_data_len);
- for (i = 1; i < rqst->rq_nvec - 1; i++)
- smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
- rqst->rq_iov[i+1].iov_len);
+ smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+ rqst->rq_iov[i].iov_len);
for (j = 0; i < sg_len - 1; i++, j++) {
unsigned int len, offset;
@@ -2224,18 +2379,17 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
return 1;
}
/*
- * Encrypt or decrypt @rqst message. @rqst has the following format:
- * iov[0] - rfc1002 length
- * iov[1] - transform header (associate data),
- * iov[2-N] and pages - data to encrypt.
- * On success return encrypted data in iov[2-N] and pages, leave iov[0-1]
+ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+ * iov[0] - transform header (associate data),
+ * iov[1-N] - SMB2 header and pages - data to encrypt.
+ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
* untouched.
*/
static int
crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
{
struct smb2_transform_hdr *tr_hdr =
- (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int rc = 0;
struct scatterlist *sg;
@@ -2323,10 +2477,6 @@ free_req:
return rc;
}
-/*
- * This is called from smb_send_rqst. At this point we have the rfc1002
- * header as the first element in the vector.
- */
static int
smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
struct smb_rqst *old_rq)
@@ -2335,7 +2485,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
struct page **pages;
struct smb2_transform_hdr *tr_hdr;
unsigned int npages = old_rq->rq_npages;
- unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base);
+ unsigned int orig_len;
int i;
int rc = -ENOMEM;
@@ -2355,18 +2505,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
goto err_free_pages;
}
- /* Make space for one extra iov to hold the transform header */
iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
GFP_KERNEL);
if (!iov)
goto err_free_pages;
- /* copy all iovs from the old except the 1st one (rfc1002 length) */
- memcpy(&iov[2], &old_rq->rq_iov[1],
- sizeof(struct kvec) * (old_rq->rq_nvec - 1));
- /* copy the rfc1002 iov */
- iov[0].iov_base = old_rq->rq_iov[0].iov_base;
- iov[0].iov_len = old_rq->rq_iov[0].iov_len;
+ /* copy all iovs from the old */
+ memcpy(&iov[1], &old_rq->rq_iov[0],
+ sizeof(struct kvec) * old_rq->rq_nvec);
new_rq->rq_iov = iov;
new_rq->rq_nvec = old_rq->rq_nvec + 1;
@@ -2375,14 +2521,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
if (!tr_hdr)
goto err_free_iov;
+ orig_len = smb2_rqst_len(old_rq, false);
+
/* fill the 2nd iov with a transform header */
fill_transform_hdr(tr_hdr, orig_len, old_rq);
- new_rq->rq_iov[1].iov_base = tr_hdr;
- new_rq->rq_iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-
- /* Update rfc1002 header */
- inc_rfc1001_len(new_rq->rq_iov[0].iov_base,
- sizeof(struct smb2_transform_hdr));
+ new_rq->rq_iov[0].iov_base = tr_hdr;
+ new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
/* copy pages form the old */
for (i = 0; i < npages; i++) {
@@ -2426,7 +2570,7 @@ smb3_free_transform_rq(struct smb_rqst *rqst)
put_page(rqst->rq_pages[i]);
kfree(rqst->rq_pages);
/* free transform header */
- kfree(rqst->rq_iov[1].iov_base);
+ kfree(rqst->rq_iov[0].iov_base);
kfree(rqst->rq_iov);
}
@@ -2443,19 +2587,17 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
unsigned int buf_data_size, struct page **pages,
unsigned int npages, unsigned int page_data_size)
{
- struct kvec iov[3];
+ struct kvec iov[2];
struct smb_rqst rqst = {NULL};
int rc;
- iov[0].iov_base = NULL;
- iov[0].iov_len = 0;
- iov[1].iov_base = buf;
- iov[1].iov_len = sizeof(struct smb2_transform_hdr);
- iov[2].iov_base = buf + sizeof(struct smb2_transform_hdr);
- iov[2].iov_len = buf_data_size;
+ iov[0].iov_base = buf;
+ iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+ iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+ iov[1].iov_len = buf_data_size;
rqst.rq_iov = iov;
- rqst.rq_nvec = 3;
+ rqst.rq_nvec = 2;
rqst.rq_pages = pages;
rqst.rq_npages = npages;
rqst.rq_pagesz = PAGE_SIZE;
@@ -2467,7 +2609,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
if (rc)
return rc;
- memmove(buf, iov[2].iov_base, buf_data_size);
+ memmove(buf, iov[1].iov_base, buf_data_size);
server->total_read = buf_data_size + page_data_size;
@@ -3170,6 +3312,7 @@ struct smb_version_operations smb311_operations = {
.set_compression = smb2_set_compression,
.mkdir = smb2_mkdir,
.mkdir_setinfo = smb2_mkdir_setinfo,
+ .posix_mkdir = smb311_posix_mkdir,
.rmdir = smb2_rmdir,
.unlink = smb2_unlink,
.rename = smb2_rename_path,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index af032e1a3eac..810b85787c91 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -602,6 +602,7 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req,
int
SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
{
+ struct smb_rqst rqst;
struct smb2_negotiate_req *req;
struct smb2_negotiate_rsp *rsp;
struct kvec iov[1];
@@ -673,7 +674,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
/*
@@ -990,8 +995,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
req->PreviousSessionId = sess_data->previous_session;
req->Flags = 0; /* MBZ */
- /* to enable echos and oplocks */
- req->sync_hdr.CreditRequest = cpu_to_le16(3);
+
+ /* enough to enable echos and oplocks and one max size write */
+ req->sync_hdr.CreditRequest = cpu_to_le16(130);
/* only one of SMB2 signing flags may be set in SMB2 request */
if (server->sign)
@@ -1027,6 +1033,7 @@ static int
SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
{
int rc;
+ struct smb_rqst rqst;
struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
struct kvec rsp_iov = { NULL, 0 };
@@ -1035,10 +1042,13 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
- /* BB add code to build os and lm fields */
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = sess_data->iov;
+ rqst.rq_nvec = 2;
- rc = smb2_send_recv(sess_data->xid, sess_data->ses,
- sess_data->iov, 2,
+ /* BB add code to build os and lm fields */
+ rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+ &rqst,
&sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
cifs_small_buf_release(sess_data->iov[0].iov_base);
@@ -1376,6 +1386,7 @@ out:
int
SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
{
+ struct smb_rqst rqst;
struct smb2_logoff_req *req; /* response is also trivial struct */
int rc = 0;
struct TCP_Server_Info *server;
@@ -1413,7 +1424,11 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
/*
* No tcon so can't do
@@ -1443,6 +1458,7 @@ int
SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct cifs_tcon *tcon, const struct nls_table *cp)
{
+ struct smb_rqst rqst;
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
@@ -1499,7 +1515,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
!smb3_encryption_required(tcon))
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
@@ -1563,6 +1583,7 @@ tcon_error_exit:
int
SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
{
+ struct smb_rqst rqst;
struct smb2_tree_disconnect_req *req; /* response is trivial */
int rc = 0;
struct cifs_ses *ses = tcon->ses;
@@ -1593,7 +1614,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1886,11 +1911,165 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
return 0;
}
+#ifdef CONFIG_CIFS_SMB311
+int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
+ const char *full_path,
+ struct cifs_sb_info *cifs_sb)
+{
+ struct smb_rqst rqst;
+ struct smb2_create_req *req;
+ struct smb2_create_rsp *rsp;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses = tcon->ses;
+ struct kvec iov[3]; /* make sure at least one for each open context */
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype;
+ int uni_path_len;
+ __le16 *copy_path = NULL;
+ int copy_size;
+ int rc = 0;
+ unsigned int n_iov = 2;
+ __u32 file_attributes = 0;
+ char *pc_buf = NULL;
+ int flags = 0;
+ unsigned int total_len;
+ __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+
+ if (!path)
+ return -ENOMEM;
+
+ cifs_dbg(FYI, "mkdir\n");
+
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
+ if (rc)
+ return rc;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+
+ req->ImpersonationLevel = IL_IMPERSONATION;
+ req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+ /* File attributes ignored on open (used in create though) */
+ req->FileAttributes = cpu_to_le32(file_attributes);
+ req->ShareAccess = FILE_SHARE_ALL_LE;
+ req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+ req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+
+ iov[0].iov_base = (char *)req;
+ /* -1 since last byte is buf[0] which is sent below (path) */
+ iov[0].iov_len = total_len - 1;
+
+ req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+
+ /* [MS-SMB2] 2.2.13 NameOffset:
+ * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+ * the SMB2 header, the file name includes a prefix that will
+ * be processed during DFS name normalization as specified in
+ * section 3.3.5.9. Otherwise, the file name is relative to
+ * the share that is identified by the TreeId in the SMB2
+ * header.
+ */
+ if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+ int name_len;
+
+ req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+ rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+ &name_len,
+ tcon->treeName, path);
+ if (rc) {
+ cifs_small_buf_release(req);
+ return rc;
+ }
+ req->NameLength = cpu_to_le16(name_len * 2);
+ uni_path_len = copy_size;
+ path = copy_path;
+ } else {
+ uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+ /* MUST set path len (NameLength) to 0 opening root of share */
+ req->NameLength = cpu_to_le16(uni_path_len - 2);
+ if (uni_path_len % 8 != 0) {
+ copy_size = roundup(uni_path_len, 8);
+ copy_path = kzalloc(copy_size, GFP_KERNEL);
+ if (!copy_path) {
+ cifs_small_buf_release(req);
+ return -ENOMEM;
+ }
+ memcpy((char *)copy_path, (const char *)path,
+ uni_path_len);
+ uni_path_len = copy_size;
+ path = copy_path;
+ }
+ }
+
+ iov[1].iov_len = uni_path_len;
+ iov[1].iov_base = path;
+ req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+
+ if (tcon->posix_extensions) {
+ if (n_iov > 2) {
+ struct create_context *ccontext =
+ (struct create_context *)iov[n_iov-1].iov_base;
+ ccontext->Next =
+ cpu_to_le32(iov[n_iov-1].iov_len);
+ }
+
+ rc = add_posix_context(iov, &n_iov, mode);
+ if (rc) {
+ cifs_small_buf_release(req);
+ kfree(copy_path);
+ return rc;
+ }
+ pc_buf = iov[n_iov-1].iov_base;
+ }
+
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_iov;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ &rsp_iov);
+
+ cifs_small_buf_release(req);
+ rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+
+ if (rc != 0) {
+ cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+ trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+ CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+ goto smb311_mkdir_exit;
+ } else
+ trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+ ses->Suid, CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES);
+
+ SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+ /* Eventually save off posix specific response info and timestaps */
+
+smb311_mkdir_exit:
+ kfree(copy_path);
+ kfree(pc_buf);
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+
+}
+#endif /* SMB311 */
+
int
SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
__u8 *oplock, struct smb2_file_all_info *buf,
struct kvec *err_iov, int *buftype)
{
+ struct smb_rqst rqst;
struct smb2_create_req *req;
struct smb2_create_rsp *rsp;
struct TCP_Server_Info *server;
@@ -2043,7 +2222,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
}
#endif /* SMB311 */
- rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_iov;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2099,6 +2282,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
char *in_data, u32 indatalen,
char **out_data, u32 *plen /* returned data len */)
{
+ struct smb_rqst rqst;
struct smb2_ioctl_req *req;
struct smb2_ioctl_rsp *rsp;
struct cifs_ses *ses;
@@ -2189,7 +2373,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_iov;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2274,6 +2462,7 @@ int
SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int flags)
{
+ struct smb_rqst rqst;
struct smb2_close_req *req;
struct smb2_close_rsp *rsp;
struct cifs_ses *ses = tcon->ses;
@@ -2301,7 +2490,11 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
@@ -2387,6 +2580,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
u32 additional_info, size_t output_len, size_t min_len, void **data,
u32 *dlen)
{
+ struct smb_rqst rqst;
struct smb2_query_info_req *req;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov[2];
@@ -2427,7 +2621,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -2594,11 +2792,10 @@ SMB2_echo(struct TCP_Server_Info *server)
{
struct smb2_echo_req *req;
int rc = 0;
- struct kvec iov[2];
+ struct kvec iov[1];
struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = 2 };
+ .rq_nvec = 1 };
unsigned int total_len;
- __be32 rfc1002_marker;
cifs_dbg(FYI, "In echo request\n");
@@ -2614,11 +2811,8 @@ SMB2_echo(struct TCP_Server_Info *server)
req->sync_hdr.CreditRequest = cpu_to_le16(1);
- iov[0].iov_len = 4;
- rfc1002_marker = cpu_to_be32(total_len);
- iov[0].iov_base = &rfc1002_marker;
- iov[1].iov_len = total_len;
- iov[1].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+ iov[0].iov_base = (char *)req;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
server, CIFS_ECHO_OP);
@@ -2633,6 +2827,7 @@ int
SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid)
{
+ struct smb_rqst rqst;
struct smb2_flush_req *req;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
@@ -2660,7 +2855,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc != 0) {
@@ -2848,10 +3047,9 @@ smb2_async_readv(struct cifs_readdata *rdata)
struct smb2_sync_hdr *shdr;
struct cifs_io_parms io_parms;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
- .rq_nvec = 2 };
+ .rq_nvec = 1 };
struct TCP_Server_Info *server;
unsigned int total_len;
- __be32 req_len;
cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
__func__, rdata->offset, rdata->bytes);
@@ -2882,12 +3080,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (smb3_encryption_required(io_parms.tcon))
flags |= CIFS_TRANSFORM_REQ;
- req_len = cpu_to_be32(total_len);
-
- rdata->iov[0].iov_base = &req_len;
- rdata->iov[0].iov_len = sizeof(__be32);
- rdata->iov[1].iov_base = buf;
- rdata->iov[1].iov_len = total_len;
+ rdata->iov[0].iov_base = buf;
+ rdata->iov[0].iov_len = total_len;
shdr = (struct smb2_sync_hdr *)buf;
@@ -2926,6 +3120,7 @@ int
SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type)
{
+ struct smb_rqst rqst;
int resp_buftype, rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_read_rsp *rsp = NULL;
@@ -2946,7 +3141,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -3062,10 +3261,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
struct smb2_sync_hdr *shdr;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
- struct kvec iov[2];
+ struct kvec iov[1];
struct smb_rqst rqst = { };
unsigned int total_len;
- __be32 rfc1002_marker;
rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
if (rc) {
@@ -3137,15 +3335,11 @@ smb2_async_writev(struct cifs_writedata *wdata,
v1->length = cpu_to_le32(wdata->mr->mr->length);
}
#endif
- /* 4 for rfc1002 length field and 1 for Buffer */
- iov[0].iov_len = 4;
- rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
- iov[0].iov_base = &rfc1002_marker;
- iov[1].iov_len = total_len - 1;
- iov[1].iov_base = (char *)req;
+ iov[0].iov_len = total_len - 1;
+ iov[0].iov_base = (char *)req;
rqst.rq_iov = iov;
- rqst.rq_nvec = 2;
+ rqst.rq_nvec = 1;
rqst.rq_pages = wdata->pages;
rqst.rq_offset = wdata->page_offset;
rqst.rq_npages = wdata->nr_pages;
@@ -3153,7 +3347,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
rqst.rq_tailsz = wdata->tailsz;
#ifdef CONFIG_CIFS_SMB_DIRECT
if (wdata->mr) {
- iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+ iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
rqst.rq_npages = 0;
}
#endif
@@ -3210,6 +3404,7 @@ int
SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec)
{
+ struct smb_rqst rqst;
int rc = 0;
struct smb2_write_req *req = NULL;
struct smb2_write_rsp *rsp = NULL;
@@ -3251,7 +3446,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
- rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_vec + 1;
+
+ rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
&resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -3323,6 +3522,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
+ struct smb_rqst rqst;
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
@@ -3395,7 +3595,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
- rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -3454,6 +3658,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
u8 info_type, u32 additional_info, unsigned int num,
void **data, unsigned int *size)
{
+ struct smb_rqst rqst;
struct smb2_set_info_req *req;
struct smb2_set_info_rsp *rsp = NULL;
struct kvec *iov;
@@ -3509,7 +3714,11 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
iov[i].iov_len = size[i];
}
- rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = num;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
@@ -3664,6 +3873,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
__u8 oplock_level)
{
+ struct smb_rqst rqst;
int rc;
struct smb2_oplock_break *req = NULL;
struct cifs_ses *ses = tcon->ses;
@@ -3692,7 +3902,11 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -3755,6 +3969,7 @@ int
SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
+ struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
@@ -3773,7 +3988,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3798,6 +4017,7 @@ int
SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int level)
{
+ struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
@@ -3829,7 +4049,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3868,6 +4092,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
const __u32 num_lock, struct smb2_lock_element *buf)
{
+ struct smb_rqst rqst;
int rc = 0;
struct smb2_lock_req *req = NULL;
struct kvec iov[2];
@@ -3900,7 +4125,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
iov[1].iov_len = count;
cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
- rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+
+ rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
&rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -3934,6 +4164,7 @@ int
SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
__u8 *lease_key, const __le32 lease_state)
{
+ struct smb_rqst rqst;
int rc;
struct smb2_lease_ack *req = NULL;
struct cifs_ses *ses = tcon->ses;
@@ -3964,7 +4195,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index a345560001ce..824dddeee3f2 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -851,8 +851,11 @@ struct validate_negotiate_info_rsp {
__le16 Dialect; /* Dialect in use for the connection */
} __packed;
-#define RSS_CAPABLE 0x00000001
-#define RDMA_CAPABLE 0x00000002
+#define RSS_CAPABLE cpu_to_le32(0x00000001)
+#define RDMA_CAPABLE cpu_to_le32(0x00000002)
+
+#define INTERNETWORK cpu_to_le16(0x0002)
+#define INTERNETWORKV6 cpu_to_le16(0x0017)
struct network_interface_info_ioctl_rsp {
__le32 Next; /* next interface. zero if this is last one */
@@ -860,7 +863,21 @@ struct network_interface_info_ioctl_rsp {
__le32 Capability; /* RSS or RDMA Capable */
__le32 Reserved;
__le64 LinkSpeed;
- char SockAddr_Storage[128];
+ __le16 Family;
+ __u8 Buffer[126];
+} __packed;
+
+struct iface_info_ipv4 {
+ __be16 Port;
+ __be32 IPv4Address;
+ __be64 Reserved;
+} __packed;
+
+struct iface_info_ipv6 {
+ __be16 Port;
+ __be32 FlowInfo;
+ __u8 IPv6Address[16];
+ __be32 ScopeId;
} __packed;
#define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index c84020057bd8..3ae208ac2a77 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -79,6 +79,10 @@ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, bool set_alloc);
extern int smb2_set_file_info(struct inode *inode, const char *full_path,
FILE_BASIC_INFO *buf, const unsigned int xid);
+extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
+ const char *full_path,
+ struct cifs_sb_info *cifs_sb);
extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
const char *name, struct cifs_sb_info *cifs_sb);
extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
@@ -109,6 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
extern void smb2_reconnect_server(struct work_struct *work);
extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+extern unsigned long
+smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker);
/*
* SMB2 Worker functions - most of protocol specific implementation details
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 349d5ccf854c..51b9437c3c7b 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -171,9 +171,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
unsigned char *sigptr = smb2_signature;
struct kvec *iov = rqst->rq_iov;
- int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
- struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -204,7 +202,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return rc;
}
- rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
+ rc = __cifs_calc_signature(rqst, server, sigptr,
&server->secmech.sdeschmacsha256->shash);
if (!rc)
@@ -414,9 +412,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
unsigned char smb3_signature[SMB2_CMACAES_SIZE];
unsigned char *sigptr = smb3_signature;
struct kvec *iov = rqst->rq_iov;
- int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
- struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
ses = smb2_find_smb_ses(server, shdr->SessionId);
@@ -447,7 +443,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
return rc;
}
- rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
+ rc = __cifs_calc_signature(rqst, server, sigptr,
&server->secmech.sdesccmacaes->shash);
if (!rc)
@@ -462,7 +458,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
int rc = 0;
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
server->tcpStatus == CifsNeedNegotiate)
@@ -635,7 +631,7 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
{
int rc;
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
smb2_seq_num_into_buf(ses->server, shdr);
@@ -656,7 +652,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
int rc;
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
smb2_seq_num_into_buf(server, shdr);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index e459c97151b3..6fd94d9ffac2 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -18,6 +18,7 @@
#include "smbdirect.h"
#include "cifs_debug.h"
#include "cifsproto.h"
+#include "smb2proto.h"
static struct smbd_response *get_empty_queue_buffer(
struct smbd_connection *info);
@@ -2087,7 +2088,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
struct kvec vec;
int nvecs;
int size;
- unsigned int buflen = 0, remaining_data_length;
+ unsigned int buflen, remaining_data_length;
int start, i, j;
int max_iov_size =
info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2111,25 +2112,13 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
return -EINVAL;
}
- iov = &rqst->rq_iov[1];
-
- /* total up iov array first */
- for (i = 0; i < rqst->rq_nvec-1; i++) {
- buflen += iov[i].iov_len;
- }
/*
* Add in the page array if there is one. The caller needs to set
* rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
* ends at page boundary
*/
- if (rqst->rq_npages) {
- if (rqst->rq_npages == 1)
- buflen += rqst->rq_tailsz;
- else
- buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
- rqst->rq_offset + rqst->rq_tailsz;
- }
+ buflen = smb2_rqst_len(rqst, true);
if (buflen + sizeof(struct smbd_data_transfer) >
info->max_fragmented_send_size) {
@@ -2139,6 +2128,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
goto done;
}
+ iov = &rqst->rq_iov[1];
+
cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
for (i = 0; i < rqst->rq_nvec-1; i++)
dump_smb(iov[i].iov_base, iov[i].iov_len);
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index 61e74d455d90..67e413f6ee4d 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -378,7 +378,7 @@ DEFINE_EVENT(smb3_open_err_class, smb3_##name, \
TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-
+DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
DECLARE_EVENT_CLASS(smb3_open_done_class,
TP_PROTO(unsigned int xid,
@@ -420,6 +420,7 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name, \
TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
#endif /* _CIFS_TRACE_H */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 1f1a68f89110..fb57dfbfb749 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -201,15 +201,24 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
return 0;
}
-static unsigned long
-rqst_len(struct smb_rqst *rqst)
+unsigned long
+smb2_rqst_len(struct smb_rqst *rqst, bool skip_rfc1002_marker)
{
unsigned int i;
- struct kvec *iov = rqst->rq_iov;
+ struct kvec *iov;
+ int nvec;
unsigned long buflen = 0;
+ if (skip_rfc1002_marker && rqst->rq_iov[0].iov_len == 4) {
+ iov = &rqst->rq_iov[1];
+ nvec = rqst->rq_nvec - 1;
+ } else {
+ iov = rqst->rq_iov;
+ nvec = rqst->rq_nvec;
+ }
+
/* total up iov array first */
- for (i = 0; i < rqst->rq_nvec; i++)
+ for (i = 0; i < nvec; i++)
buflen += iov[i].iov_len;
/*
@@ -236,18 +245,20 @@ rqst_len(struct smb_rqst *rqst)
}
static int
-__smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst)
{
- int rc;
- struct kvec *iov = rqst->rq_iov;
- int n_vec = rqst->rq_nvec;
- unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
- unsigned long send_length;
- unsigned int i;
+ int rc = 0;
+ struct kvec *iov;
+ int n_vec;
+ unsigned int send_length = 0;
+ unsigned int i, j;
size_t total_len = 0, sent, size;
struct socket *ssocket = server->ssocket;
struct msghdr smb_msg;
int val = 1;
+ __be32 rfc1002_marker;
+
if (cifs_rdma_enabled(server) && server->smbd_conn) {
rc = smbd_send(server->smbd_conn, rqst);
goto smbd_done;
@@ -255,51 +266,67 @@ __smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
if (ssocket == NULL)
return -ENOTSOCK;
- /* sanity check send length */
- send_length = rqst_len(rqst);
- if (send_length != smb_buf_length + 4) {
- WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
- send_length, smb_buf_length);
- return -EIO;
- }
-
- if (n_vec < 2)
- return -EIO;
-
- cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
- dump_smb(iov[0].iov_base, iov[0].iov_len);
- dump_smb(iov[1].iov_base, iov[1].iov_len);
-
/* cork the socket */
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
- size = 0;
- for (i = 0; i < n_vec; i++)
- size += iov[i].iov_len;
+ for (j = 0; j < num_rqst; j++)
+ send_length += smb2_rqst_len(&rqst[j], true);
+ rfc1002_marker = cpu_to_be32(send_length);
- iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+ /* Generate a rfc1002 marker for SMB2+ */
+ if (server->vals->header_preamble_size == 0) {
+ struct kvec hiov = {
+ .iov_base = &rfc1002_marker,
+ .iov_len = 4
+ };
+ iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
+ 1, 4);
+ rc = smb_send_kvec(server, &smb_msg, &sent);
+ if (rc < 0)
+ goto uncork;
- rc = smb_send_kvec(server, &smb_msg, &sent);
- if (rc < 0)
- goto uncork;
+ total_len += sent;
+ send_length += 4;
+ }
- total_len += sent;
+ cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
- /* now walk the page array and send each page in it */
- for (i = 0; i < rqst->rq_npages; i++) {
- struct bio_vec bvec;
+ for (j = 0; j < num_rqst; j++) {
+ iov = rqst[j].rq_iov;
+ n_vec = rqst[j].rq_nvec;
- bvec.bv_page = rqst->rq_pages[i];
- rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+ size = 0;
+ for (i = 0; i < n_vec; i++) {
+ dump_smb(iov[i].iov_base, iov[i].iov_len);
+ size += iov[i].iov_len;
+ }
+
+ iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
+ iov, n_vec, size);
- iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
- &bvec, 1, bvec.bv_len);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
- break;
+ goto uncork;
total_len += sent;
+
+ /* now walk the page array and send each page in it */
+ for (i = 0; i < rqst[j].rq_npages; i++) {
+ struct bio_vec bvec;
+
+ bvec.bv_page = rqst[j].rq_pages[i];
+ rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+ &bvec.bv_offset);
+
+ iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+ &bvec, 1, bvec.bv_len);
+ rc = smb_send_kvec(server, &smb_msg, &sent);
+ if (rc < 0)
+ break;
+
+ total_len += sent;
+ }
}
uncork:
@@ -308,9 +335,9 @@ uncork:
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
- if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
+ if ((total_len > 0) && (total_len != send_length)) {
cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
- smb_buf_length + 4, total_len);
+ send_length, total_len);
/*
* If we have only sent part of an SMB then the next SMB could
* be taken as the remainder of this one. We need to kill the
@@ -335,7 +362,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
int rc;
if (!(flags & CIFS_TRANSFORM_REQ))
- return __smb_send_rqst(server, rqst);
+ return __smb_send_rqst(server, 1, rqst);
if (!server->ops->init_transform_rq ||
!server->ops->free_transform_rq) {
@@ -347,7 +374,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
if (rc)
return rc;
- rc = __smb_send_rqst(server, &cur_rqst);
+ rc = __smb_send_rqst(server, 1, &cur_rqst);
server->ops->free_transform_rq(&cur_rqst);
return rc;
}
@@ -365,7 +392,7 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
iov[1].iov_base = (char *)smb_buffer + 4;
iov[1].iov_len = smb_buf_length;
- return __smb_send_rqst(server, &rqst);
+ return __smb_send_rqst(server, 1, &rqst);
}
static int
@@ -730,7 +757,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
* to the same server. We may make this configurable later or
* use ses->maxReq.
*/
-
rc = wait_for_free_request(ses->server, timeout, optype);
if (rc)
return rc;
@@ -766,8 +792,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
#ifdef CONFIG_CIFS_SMB311
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
- smb311_update_preauth_hash(ses, rqst->rq_iov+1,
- rqst->rq_nvec-1);
+ smb311_update_preauth_hash(ses, rqst->rq_iov,
+ rqst->rq_nvec);
#endif
if (timeout == CIFS_ASYNC_OP)
@@ -812,8 +838,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
#ifdef CONFIG_CIFS_SMB311
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
struct kvec iov = {
- .iov_base = buf,
- .iov_len = midQ->resp_buf_size
+ .iov_base = resp_iov->iov_base,
+ .iov_len = resp_iov->iov_len
};
smb311_update_preauth_hash(ses, &iov, 1);
}
@@ -872,49 +898,6 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
return rc;
}
-/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
-int
-smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
- struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
- const int flags, struct kvec *resp_iov)
-{
- struct smb_rqst rqst;
- struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
- int rc;
- int i;
- __u32 count;
- __be32 rfc1002_marker;
-
- if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
- new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
- GFP_KERNEL);
- if (!new_iov)
- return -ENOMEM;
- } else
- new_iov = s_iov;
-
- /* 1st iov is an RFC1002 Session Message length */
- memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
- count = 0;
- for (i = 1; i < n_vec + 1; i++)
- count += new_iov[i].iov_len;
-
- rfc1002_marker = cpu_to_be32(count);
-
- new_iov[0].iov_base = &rfc1002_marker;
- new_iov[0].iov_len = 4;
-
- memset(&rqst, 0, sizeof(struct smb_rqst));
- rqst.rq_iov = new_iov;
- rqst.rq_nvec = n_vec + 1;
-
- rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
- if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
- kfree(new_iov);
- return rc;
-}
-
int
SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
diff --git a/fs/eventfd.c b/fs/eventfd.c
index ceb1031f1cac..08d3bd602f73 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file)
return 0;
}
-static struct wait_queue_head *
-eventfd_get_poll_head(struct file *file, __poll_t events)
-{
- struct eventfd_ctx *ctx = file->private_data;
-
- return &ctx->wqh;
-}
-
-static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
{
struct eventfd_ctx *ctx = file->private_data;
__poll_t events = 0;
u64 count;
+ poll_wait(file, &ctx->wqh, wait);
+
/*
* All writes to ctx->count occur within ctx->wqh.lock. This read
* can be done outside ctx->wqh.lock because we know that poll_wait
@@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
count = READ_ONCE(ctx->count);
if (count > 0)
- events |= (EPOLLIN & eventmask);
+ events |= EPOLLIN;
if (count == ULLONG_MAX)
events |= EPOLLERR;
if (ULLONG_MAX - 1 > count)
- events |= (EPOLLOUT & eventmask);
+ events |= EPOLLOUT;
return events;
}
@@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = {
.show_fdinfo = eventfd_show_fdinfo,
#endif
.release = eventfd_release,
- .get_poll_head = eventfd_get_poll_head,
- .poll_mask = eventfd_poll_mask,
+ .poll = eventfd_poll,
.read = eventfd_read,
.write = eventfd_write,
.llseek = noop_llseek,
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ea4436f409fb..67db22fe99c5 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
return 0;
}
-static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
- __poll_t eventmask)
-{
- struct eventpoll *ep = file->private_data;
- return &ep->poll_wait;
-}
-
-static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
struct eventpoll *ep = file->private_data;
int depth = 0;
+ /* Insert inside our poll wait queue */
+ poll_wait(file, &ep->poll_wait, wait);
+
/*
* Proceed to find out if wanted events are really available inside
* the ready list.
@@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = {
.show_fdinfo = ep_show_fdinfo,
#endif
.release = ep_eventpoll_release,
- .get_poll_head = ep_eventpoll_get_poll_head,
- .poll_mask = ep_eventpoll_poll_mask,
+ .poll = ep_eventpoll_poll,
.llseek = noop_llseek,
};
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index cc40802ddfa8..00e759f05161 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long,
unsigned long);
extern unsigned long ext2_count_free_blocks (struct super_block *);
extern unsigned long ext2_count_dirs (struct super_block *);
-extern void ext2_check_blocks_bitmap (struct super_block *);
extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
unsigned int block_group,
struct buffer_head ** bh);
@@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page
extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
extern void ext2_free_inode (struct inode *);
extern unsigned long ext2_count_free_inodes (struct super_block *);
-extern void ext2_check_inodes_bitmap (struct super_block *);
extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
/* inode.c */
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 25ab1274090f..8ff53f8da3bc 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb,
set_opt (opts->s_mount_opt, NO_UID32);
break;
case Opt_nocheck:
+ ext2_msg(sb, KERN_WARNING,
+ "Option nocheck/check=none is deprecated and"
+ " will be removed in June 2020.");
clear_opt (opts->s_mount_opt, CHECK);
break;
case Opt_debug:
@@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
new_opts.s_resgid = sbi->s_resgid;
spin_unlock(&sbi->s_lock);
- /*
- * Allow the "check" option to be passed as a remount option.
- */
if (!parse_options(data, sb, &new_opts))
return -EINVAL;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index c60f3d32ee91..a6797986b625 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (size > PSIZE) {
/*
* To keep the rest of the code simple. Allocate a
- * contiguous buffer to work with
+ * contiguous buffer to work with. Make the buffer large
+ * enough to make use of the whole extent.
*/
- ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+ ea_buf->max_size = (size + sb->s_blocksize - 1) &
+ ~(sb->s_blocksize - 1);
+
+ ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
if (ea_buf->xattr == NULL)
return -ENOMEM;
ea_buf->flag = EA_MALLOC;
- ea_buf->max_size = (size + sb->s_blocksize - 1) &
- ~(sb->s_blocksize - 1);
if (ea_size == 0)
return 0;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index bbd0465535eb..f033f3a69a3b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
res = nfs_delegation_find_inode_server(server, fhandle);
- if (res != ERR_PTR(-ENOENT))
+ if (res != ERR_PTR(-ENOENT)) {
+ rcu_read_unlock();
return res;
+ }
}
rcu_read_unlock();
return ERR_PTR(-ENOENT);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index d4a07acad598..8f003792ccde 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+ clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
case -NFS4ERR_RESET_TO_PNFS:
if (ff_layout_choose_best_ds_for_read(hdr->lseg,
hdr->pgio_mirror_idx + 1,
&hdr->pgio_mirror_idx))
goto out_eagain;
- ff_layout_read_record_layoutstats_done(task, hdr);
- pnfs_read_resend_pnfs(hdr);
+ set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
- ff_layout_reset_read(hdr);
+ set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
return task->tk_status;
case -EAGAIN:
goto out_eagain;
@@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data)
struct nfs_pgio_header *hdr = data;
ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ pnfs_read_resend_pnfs(hdr);
+ else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ ff_layout_reset_read(hdr);
pnfs_generic_rw_release(data);
}
@@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+ clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
case -NFS4ERR_RESET_TO_PNFS:
- ff_layout_reset_write(hdr, true);
+ set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
- ff_layout_reset_write(hdr, false);
+ set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
@@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data)
struct nfs_pgio_header *hdr = data;
ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ ff_layout_reset_write(hdr, true);
+ else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ ff_layout_reset_write(hdr, false);
pnfs_generic_rw_release(data);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ed45090e4df6..6dd146885da9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct inode *inode = calldata->inode;
+ struct pnfs_layout_hdr *lo;
bool is_rdonly, is_wronly, is_rdwr;
int call_close = 0;
@@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
goto out_wait;
}
+ lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
+ if (lo && !pnfs_layout_is_valid(lo)) {
+ calldata->arg.lr_args = NULL;
+ calldata->res.lr_res = NULL;
+ }
+
if (calldata->arg.fmode == 0)
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
@@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata)
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
{
struct nfs4_delegreturndata *d_data;
+ struct pnfs_layout_hdr *lo;
d_data = (struct nfs4_delegreturndata *)data;
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
return;
+ lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+ if (lo && !pnfs_layout_is_valid(lo)) {
+ d_data->args.lr_args = NULL;
+ d_data->res.lr_res = NULL;
+ }
+
nfs4_setup_sequence(d_data->res.server->nfs_client,
&d_data->args.seq_args,
&d_data->res.seq_res,
@@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
+ nfs4_sequence_free_slot(&lgp->res.seq_res);
+
switch (nfs4err) {
case 0:
goto out;
@@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
goto out;
}
- nfs4_sequence_free_slot(&lgp->res.seq_res);
err = nfs4_handle_exception(server, nfs4err, exception);
if (!status) {
if (exception->retry)
@@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
if (IS_ERR(task))
return ERR_CAST(task);
status = rpc_wait_for_completion_task(task);
- if (status == 0) {
+ if (status != 0)
+ goto out;
+
+ /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+ if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
status = nfs4_layoutget_handle_exception(task, lgp, &exception);
*timeout = exception.timeout;
- }
-
+ } else
+ lseg = pnfs_layout_process(lgp);
+out:
trace_nfs4_layoutget(lgp->args.ctx,
&lgp->args.range,
&lgp->res.range,
&lgp->res.stateid,
status);
- /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
- if (status == 0 && lgp->res.layoutp->len)
- lseg = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
if (status)
@@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
&lrp->args.seq_args,
&lrp->res.seq_res,
task);
+ if (!pnfs_layout_is_valid(lrp->args.layout))
+ rpc_exit(task, 0);
}
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index a8f5e6b16749..3fe81424337d 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
{
}
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+ return false;
+}
+
#endif /* CONFIG_NFS_V4_1 */
#if IS_ENABLED(CONFIG_NFS_V4_2)
diff --git a/fs/pipe.c b/fs/pipe.c
index bb0840e234f3..39d6f431da83 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
-static struct wait_queue_head *
-pipe_get_poll_head(struct file *filp, __poll_t events)
-{
- struct pipe_inode_info *pipe = filp->private_data;
-
- return &pipe->wait;
-}
-
/* No kernel lock held - fine */
-static __poll_t pipe_poll_mask(struct file *filp, __poll_t events)
+static __poll_t
+pipe_poll(struct file *filp, poll_table *wait)
{
+ __poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
- int nrbufs = pipe->nrbufs;
- __poll_t mask = 0;
+ int nrbufs;
+
+ poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
+ nrbufs = pipe->nrbufs;
+ mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
if (!pipe->writers && filp->f_version != pipe->w_counter)
@@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = {
.llseek = no_llseek,
.read_iter = pipe_read,
.write_iter = pipe_write,
- .get_poll_head = pipe_get_poll_head,
- .poll_mask = pipe_poll_mask,
+ .poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b6572944efc3..aaffc0c30216 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
if (env_start != arg_end || env_start >= env_end)
env_start = env_end = arg_end;
+ /* .. and limit it to a maximum of one page of slop */
+ if (env_end >= arg_end + PAGE_SIZE)
+ env_end = arg_end + PAGE_SIZE - 1;
+
/* We're not going to care if "*ppos" has high bits set */
pos = arg_start + *ppos;
@@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
while (count) {
int got;
size_t size = min_t(size_t, PAGE_SIZE, count);
+ long offset;
- got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
- if (got <= 0)
+ /*
+ * Are we already starting past the official end?
+ * We always include the last byte that is *supposed*
+ * to be NUL
+ */
+ offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+
+ got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+ if (got <= offset)
break;
+ got -= offset;
/* Don't walk past a NUL character once you hit arg_end */
if (pos + got >= arg_end) {
@@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
n = arg_end - pos - 1;
/* Cut off at first NUL after 'n' */
- got = n + strnlen(page+n, got-n);
- if (!got)
+ got = n + strnlen(page+n, offset+got-n);
+ if (got < offset)
break;
+ got -= offset;
+
+ /* Include the NUL if it existed */
+ if (got < size)
+ got++;
}
- got -= copy_to_user(buf, page, got);
+ got -= copy_to_user(buf, page+offset, got);
if (unlikely(!got)) {
if (!len)
len = -EFAULT;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 6ac1c92997ea..bb1c1625b158 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file)
return seq_open(file, de->seq_ops);
}
+static int proc_seq_release(struct inode *inode, struct file *file)
+{
+ struct proc_dir_entry *de = PDE(inode);
+
+ if (de->state_size)
+ return seq_release_private(inode, file);
+ return seq_release(inode, file);
+}
+
static const struct file_operations proc_seq_fops = {
.open = proc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = proc_seq_release,
};
struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d88231e3b2be..fc20e06c56ba 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync);
static unsigned long
dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct list_head *head;
struct dquot *dquot;
unsigned long freed = 0;
spin_lock(&dq_list_lock);
- head = free_dquots.prev;
- while (head != &free_dquots && sc->nr_to_scan) {
- dquot = list_entry(head, struct dquot, dq_free);
+ while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+ dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
remove_dquot_hash(dquot);
remove_free_dquot(dquot);
remove_inuse(dquot);
do_destroy_dquot(dquot);
sc->nr_to_scan--;
freed++;
- head = free_dquots.prev;
}
spin_unlock(&dq_list_lock);
return freed;
diff --git a/fs/select.c b/fs/select.c
index 317891ff8165..4a6b6e4b21cb 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -34,29 +34,6 @@
#include <linux/uaccess.h>
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
-{
- if (file->f_op->poll) {
- return file->f_op->poll(file, pt);
- } else if (file_has_poll_mask(file)) {
- unsigned int events = poll_requested_events(pt);
- struct wait_queue_head *head;
-
- if (pt && pt->_qproc) {
- head = file->f_op->get_poll_head(file, events);
- if (!head)
- return DEFAULT_POLLMASK;
- if (IS_ERR(head))
- return EPOLLERR;
- pt->_qproc(file, head, pt);
- }
-
- return file->f_op->poll_mask(file, events);
- } else {
- return DEFAULT_POLLMASK;
- }
-}
-EXPORT_SYMBOL_GPL(vfs_poll);
/*
* Estimate expected accuracy in ns from a timeval.
diff --git a/fs/timerfd.c b/fs/timerfd.c
index d84a2bee4f82..cdad49da3ff7 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file)
kfree_rcu(ctx, rcu);
return 0;
}
-
-static struct wait_queue_head *timerfd_get_poll_head(struct file *file,
- __poll_t eventmask)
+
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
{
struct timerfd_ctx *ctx = file->private_data;
+ __poll_t events = 0;
+ unsigned long flags;
- return &ctx->wqh;
-}
+ poll_wait(file, &ctx->wqh, wait);
-static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask)
-{
- struct timerfd_ctx *ctx = file->private_data;
+ spin_lock_irqsave(&ctx->wqh.lock, flags);
+ if (ctx->ticks)
+ events |= EPOLLIN;
+ spin_unlock_irqrestore(&ctx->wqh.lock, flags);
- return ctx->ticks ? EPOLLIN : 0;
+ return events;
}
static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
@@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
static const struct file_operations timerfd_fops = {
.release = timerfd_release,
- .get_poll_head = timerfd_get_poll_head,
- .poll_mask = timerfd_poll_mask,
+ .poll = timerfd_poll,
.read = timerfd_read,
.llseek = noop_llseek,
.show_fdinfo = timerfd_show,
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b961b1d9699..fcda0fc97b90 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
udf_write_aext(table, &epos, &eloc,
(etype << 30) | elen, 1);
} else
- udf_delete_aext(table, epos, eloc,
- (etype << 30) | elen);
+ udf_delete_aext(table, epos);
} else {
alloc_count = 0;
}
@@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
if (goal_elen)
udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
else
- udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
+ udf_delete_aext(table, goal_epos);
brelse(goal_epos.bh);
udf_add_free_space(sb, partition, -1);
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 0a98a2369738..d9523013096f 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
fibh->ebh->b_data,
sizeof(struct fileIdentDesc) + fibh->soffset);
- fi_len = (sizeof(struct fileIdentDesc) +
- cfi->lengthFileIdent +
- le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
-
+ fi_len = udf_dir_entry_len(cfi);
*nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
fibh->eoffset = fibh->soffset + fi_len;
} else {
@@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
sizeof(struct fileIdentDesc));
}
}
+ /* Got last entry outside of dir size - fs is corrupted! */
+ if (*nf_pos > dir->i_size)
+ return NULL;
return fi;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7f39d17352c9..9915a58fbabd 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
if (startnum > endnum) {
for (i = 0; i < (startnum - endnum); i++)
- udf_delete_aext(inode, *epos, laarr[i].extLocation,
- laarr[i].extLength);
+ udf_delete_aext(inode, *epos);
} else if (startnum < endnum) {
for (i = 0; i < (endnum - startnum); i++) {
udf_insert_aext(inode, *epos, laarr[i].extLocation,
@@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
return (nelen >> 30);
}
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
- struct kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
{
struct extent_position oepos;
int adsize;
int8_t etype;
struct allocExtDesc *aed;
struct udf_inode_info *iinfo;
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
if (epos.bh) {
get_bh(epos.bh);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c586026508db..06f37ddd2997 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int nfidlen;
- uint8_t lfi;
- uint16_t liu;
udf_pblk_t block;
struct kernel_lb_addr eloc;
uint32_t elen = 0;
@@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
namelen = 0;
}
- nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+ nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
f_pos = udf_ext0_offset(dir);
@@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
goto out_err;
}
- liu = le16_to_cpu(cfi->lengthOfImpUse);
- lfi = cfi->lengthFileIdent;
-
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (((sizeof(struct fileIdentDesc) +
- liu + lfi + 3) & ~3) == nfidlen) {
+ if (udf_dir_entry_len(cfi) == nfidlen) {
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
@@ -1201,9 +1195,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
if (dir_fi) {
dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
- udf_update_tag((char *)dir_fi,
- (sizeof(struct fileIdentDesc) +
- le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+ udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(old_inode);
else
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index bae311b59400..84c47dde4d26 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
struct fileIdentDesc *, struct udf_fileident_bh *,
uint8_t *, uint8_t *);
+static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
+{
+ return ALIGN(sizeof(struct fileIdentDesc) +
+ le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
+ UDF_NAME_PAD);
+}
/* file.c */
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
@@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t, int);
extern void udf_write_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position,
- struct kernel_lb_addr, uint32_t);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position);
extern int8_t udf_next_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, int);
extern int8_t udf_current_aext(struct inode *, struct extent_position *,
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index 84db76e0e3e3..fecd187fcf2c 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -157,6 +157,7 @@ __xfs_ag_resv_free(
error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
resv->ar_reserved = 0;
resv->ar_asked = 0;
+ resv->ar_orig_reserved = 0;
if (error)
trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
@@ -189,13 +190,34 @@ __xfs_ag_resv_init(
struct xfs_mount *mp = pag->pag_mount;
struct xfs_ag_resv *resv;
int error;
- xfs_extlen_t reserved;
+ xfs_extlen_t hidden_space;
if (used > ask)
ask = used;
- reserved = ask - used;
- error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+ switch (type) {
+ case XFS_AG_RESV_RMAPBT:
+ /*
+ * Space taken by the rmapbt is not subtracted from fdblocks
+ * because the rmapbt lives in the free space. Here we must
+ * subtract the entire reservation from fdblocks so that we
+ * always have blocks available for rmapbt expansion.
+ */
+ hidden_space = ask;
+ break;
+ case XFS_AG_RESV_METADATA:
+ /*
+ * Space taken by all other metadata btrees are accounted
+ * on-disk as used space. We therefore only hide the space
+ * that is reserved but not used by the trees.
+ */
+ hidden_space = ask - used;
+ break;
+ default:
+ ASSERT(0);
+ return -EINVAL;
+ }
+ error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
if (error) {
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_);
@@ -216,7 +238,8 @@ __xfs_ag_resv_init(
resv = xfs_perag_resv(pag, type);
resv->ar_asked = ask;
- resv->ar_reserved = resv->ar_orig_reserved = reserved;
+ resv->ar_orig_reserved = hidden_space;
+ resv->ar_reserved = ask - used;
trace_xfs_ag_resv_init(pag, type, ask);
return 0;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 01628f0c9a0c..7205268b30bc 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5780,6 +5780,32 @@ del_cursor:
return error;
}
+/* Make sure we won't be right-shifting an extent past the maximum bound. */
+int
+xfs_bmap_can_insert_extents(
+ struct xfs_inode *ip,
+ xfs_fileoff_t off,
+ xfs_fileoff_t shift)
+{
+ struct xfs_bmbt_irec got;
+ int is_empty;
+ int error = 0;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return -EIO;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
+ if (!error && !is_empty && got.br_startoff >= off &&
+ ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
+ error = -EINVAL;
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+ return error;
+}
+
int
xfs_bmap_insert_extents(
struct xfs_trans *tp,
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 99dddbd0fcc6..9b49ddf99c41 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops);
+int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
+ xfs_fileoff_t shift);
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 1c5a8aaf2bfc..059bc44c27e8 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt {
XFS_DFORK_DSIZE(dip, mp) : \
XFS_DFORK_ASIZE(dip, mp))
+#define XFS_DFORK_MAXEXT(dip, mp, w) \
+ (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec))
+
/*
* Return pointers to the data or attribute forks.
*/
@@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block {
#define BMBT_STARTBLOCK_BITLEN 52
#define BMBT_BLOCKCOUNT_BITLEN 21
+#define BMBT_STARTOFF_MASK ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+
typedef struct xfs_bmbt_rec {
__be64 l0, l1;
} xfs_bmbt_rec_t;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index d38d724534c4..33dc34655ac3 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -374,6 +374,47 @@ xfs_log_dinode_to_disk(
}
}
+static xfs_failaddr_t
+xfs_dinode_verify_fork(
+ struct xfs_dinode *dip,
+ struct xfs_mount *mp,
+ int whichfork)
+{
+ uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
+
+ switch (XFS_DFORK_FORMAT(dip, whichfork)) {
+ case XFS_DINODE_FMT_LOCAL:
+ /*
+ * no local regular files yet
+ */
+ if (whichfork == XFS_DATA_FORK) {
+ if (S_ISREG(be16_to_cpu(dip->di_mode)))
+ return __this_address;
+ if (be64_to_cpu(dip->di_size) >
+ XFS_DFORK_SIZE(dip, mp, whichfork))
+ return __this_address;
+ }
+ if (di_nextents)
+ return __this_address;
+ break;
+ case XFS_DINODE_FMT_EXTENTS:
+ if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
+ return __this_address;
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ if (whichfork == XFS_ATTR_FORK) {
+ if (di_nextents > MAXAEXTNUM)
+ return __this_address;
+ } else if (di_nextents > MAXEXTNUM) {
+ return __this_address;
+ }
+ break;
+ default:
+ return __this_address;
+ }
+ return NULL;
+}
+
xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
@@ -441,24 +482,9 @@ xfs_dinode_verify(
case S_IFREG:
case S_IFLNK:
case S_IFDIR:
- switch (dip->di_format) {
- case XFS_DINODE_FMT_LOCAL:
- /*
- * no local regular files yet
- */
- if (S_ISREG(mode))
- return __this_address;
- if (di_size > XFS_DFORK_DSIZE(dip, mp))
- return __this_address;
- if (dip->di_nextents)
- return __this_address;
- /* fall through */
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- break;
- default:
- return __this_address;
- }
+ fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
+ if (fa)
+ return fa;
break;
case 0:
/* Uninitialized inode ok. */
@@ -468,17 +494,9 @@ xfs_dinode_verify(
}
if (XFS_DFORK_Q(dip)) {
- switch (dip->di_aformat) {
- case XFS_DINODE_FMT_LOCAL:
- if (dip->di_anextents)
- return __this_address;
- /* fall through */
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- break;
- default:
- return __this_address;
- }
+ fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
+ if (fa)
+ return fa;
} else {
/*
* If there is no fork offset, this may be a freshly-made inode
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 65fc4ed2e9a1..b228c821bae6 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range(
if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
low_rec->ar_startext == high_rec->ar_startext)
return 0;
- if (high_rec->ar_startext >= mp->m_sb.sb_rextents)
- high_rec->ar_startext = mp->m_sb.sb_rextents - 1;
+ if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+ high_rec->ar_startext = mp->m_sb.sb_rextents;
/* Iterate the bitmap, looking for discrepancies. */
rtstart = low_rec->ar_startext;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index c35009a86699..83b1e8c6c18f 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -685,12 +685,10 @@ out_unlock_iolock:
}
/*
- * dead simple method of punching delalyed allocation blocks from a range in
- * the inode. Walks a block at a time so will be slow, but is only executed in
- * rare error cases so the overhead is not critical. This will always punch out
- * both the start and end blocks, even if the ranges only partially overlap
- * them, so it is up to the caller to ensure that partial blocks are not
- * passed in.
+ * Dead simple method of punching delalyed allocation blocks from a range in
+ * the inode. This will always punch out both the start and end blocks, even
+ * if the ranges only partially overlap them, so it is up to the caller to
+ * ensure that partial blocks are not passed in.
*/
int
xfs_bmap_punch_delalloc_range(
@@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range(
xfs_fileoff_t start_fsb,
xfs_fileoff_t length)
{
- xfs_fileoff_t remaining = length;
+ struct xfs_ifork *ifp = &ip->i_df;
+ xfs_fileoff_t end_fsb = start_fsb + length;
+ struct xfs_bmbt_irec got, del;
+ struct xfs_iext_cursor icur;
int error = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- do {
- int done;
- xfs_bmbt_irec_t imap;
- int nimaps = 1;
- xfs_fsblock_t firstblock;
- struct xfs_defer_ops dfops;
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ return error;
+ }
- /*
- * Map the range first and check that it is a delalloc extent
- * before trying to unmap the range. Otherwise we will be
- * trying to remove a real extent (which requires a
- * transaction) or a hole, which is probably a bad idea...
- */
- error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
- XFS_BMAPI_ENTIRE);
+ if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+ return 0;
- if (error) {
- /* something screwed, just bail */
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_alert(ip->i_mount,
- "Failed delalloc mapping lookup ino %lld fsb %lld.",
- ip->i_ino, start_fsb);
- }
- break;
- }
- if (!nimaps) {
- /* nothing there */
- goto next_block;
- }
- if (imap.br_startblock != DELAYSTARTBLOCK) {
- /* been converted, ignore */
- goto next_block;
- }
- WARN_ON(imap.br_blockcount == 0);
+ while (got.br_startoff + got.br_blockcount > start_fsb) {
+ del = got;
+ xfs_trim_extent(&del, start_fsb, length);
/*
- * Note: while we initialise the firstblock/dfops pair, they
- * should never be used because blocks should never be
- * allocated or freed for a delalloc extent and hence we need
- * don't cancel or finish them after the xfs_bunmapi() call.
+ * A delete can push the cursor forward. Step back to the
+ * previous extent on non-delalloc or extents outside the
+ * target range.
*/
- xfs_defer_init(&dfops, &firstblock);
- error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
- &dfops, &done);
- if (error)
- break;
+ if (!del.br_blockcount ||
+ !isnullstartblock(del.br_startblock)) {
+ if (!xfs_iext_prev_extent(ifp, &icur, &got))
+ break;
+ continue;
+ }
- ASSERT(!xfs_defer_has_unfinished_work(&dfops));
-next_block:
- start_fsb++;
- remaining--;
- } while(remaining > 0);
+ error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
+ &got, &del);
+ if (error || !xfs_iext_get_extent(ifp, &icur, &got))
+ break;
+ }
return error;
}
@@ -1208,7 +1187,22 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+ error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+ if (error)
+ return error;
+
+ /*
+ * If we zeroed right up to EOF and EOF straddles a page boundary we
+ * must make sure that the post-EOF area is also zeroed because the
+ * page could be mmap'd and iomap_zero_range doesn't do that for us.
+ * Writeback of the eof page will do this, albeit clumsily.
+ */
+ if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+ (offset + len) & ~PAGE_MASK, LLONG_MAX);
+ }
+
+ return error;
}
/*
@@ -1404,6 +1398,10 @@ xfs_insert_file_space(
trace_xfs_insert_file_space(ip);
+ error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
+ if (error)
+ return error;
+
error = xfs_prepare_shift(ip, offset);
if (error)
return error;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index c34fa9c342f2..c7157bc48bd1 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query(
struct xfs_trans *tp,
struct xfs_getfsmap_info *info)
{
- struct xfs_rtalloc_rec alow;
- struct xfs_rtalloc_rec ahigh;
+ struct xfs_rtalloc_rec alow = { 0 };
+ struct xfs_rtalloc_rec ahigh = { 0 };
int error;
xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a7afcad6b711..3f2bd6032cf8 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -387,7 +387,7 @@ xfs_reserve_blocks(
do {
free = percpu_counter_sum(&mp->m_fdblocks) -
mp->m_alloc_set_aside;
- if (!free)
+ if (free <= 0)
break;
delta = request - mp->m_resblks;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7a96c4e0ab5c..5df4de666cc1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3236,7 +3236,6 @@ xfs_iflush_cluster(
struct xfs_inode *cip;
int nr_found;
int clcount = 0;
- int bufwasdelwri;
int i;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -3360,37 +3359,22 @@ cluster_corrupt_out:
* inode buffer and shut down the filesystem.
*/
rcu_read_unlock();
- /*
- * Clean up the buffer. If it was delwri, just release it --
- * brelse can handle it with no problems. If not, shut down the
- * filesystem before releasing the buffer.
- */
- bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
- if (bufwasdelwri)
- xfs_buf_relse(bp);
-
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- if (!bufwasdelwri) {
- /*
- * Just like incore_relse: if we have b_iodone functions,
- * mark the buffer as an error and call them. Otherwise
- * mark it as stale and brelse.
- */
- if (bp->b_iodone) {
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
- xfs_buf_ioerror(bp, -EIO);
- xfs_buf_ioend(bp);
- } else {
- xfs_buf_stale(bp);
- xfs_buf_relse(bp);
- }
- }
-
/*
- * Unlocks the flush lock
+ * We'll always have an inode attached to the buffer for completion
+ * process by the time we are called from xfs_iflush(). Hence we have
+ * always need to do IO completion processing to abort the inodes
+ * attached to the buffer. handle them just like the shutdown case in
+ * xfs_buf_submit().
*/
+ ASSERT(bp->b_iodone);
+ bp->b_flags &= ~XBF_DONE;
+ xfs_buf_stale(bp);
+ xfs_buf_ioerror(bp, -EIO);
+ xfs_buf_ioend(bp);
+
+ /* abort the corrupt inode, as it was not attached to the buffer */
xfs_iflush_abort(cip, false);
kmem_free(cilist);
xfs_perag_put(pag);
@@ -3486,12 +3470,17 @@ xfs_iflush(
xfs_log_force(mp, 0);
/*
- * inode clustering:
- * see if other inodes can be gathered into this write
+ * inode clustering: try to gather other inodes into this write
+ *
+ * Note: Any error during clustering will result in the filesystem
+ * being shut down and completion callbacks run on the cluster buffer.
+ * As we have already flushed and attached this inode to the buffer,
+ * it has already been aborted and released by xfs_iflush_cluster() and
+ * so we have no further error handling to do here.
*/
error = xfs_iflush_cluster(ip, bp);
if (error)
- goto cluster_corrupt_out;
+ return error;
*bpp = bp;
return 0;
@@ -3500,12 +3489,8 @@ corrupt_out:
if (bp)
xfs_buf_relse(bp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-cluster_corrupt_out:
- error = -EFSCORRUPTED;
abort_out:
- /*
- * Unlocks the flush lock
- */
+ /* abort the corrupt inode, as it was not attached to the buffer */
xfs_iflush_abort(ip, false);
return error;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 49f5492eed3b..55876dd02f0c 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -963,12 +963,13 @@ xfs_ilock_for_iomap(
unsigned *lockmode)
{
unsigned mode = XFS_ILOCK_SHARED;
+ bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
/*
* COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here.
*/
- if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
+ if (xfs_is_reflink_inode(ip) && is_write) {
/*
* FIXME: It could still overwrite on unshared extents and not
* need allocation.
@@ -989,6 +990,7 @@ xfs_ilock_for_iomap(
mode = XFS_ILOCK_EXCL;
}
+relock:
if (flags & IOMAP_NOWAIT) {
if (!xfs_ilock_nowait(ip, mode))
return -EAGAIN;
@@ -996,6 +998,17 @@ xfs_ilock_for_iomap(
xfs_ilock(ip, mode);
}
+ /*
+ * The reflink iflag could have changed since the earlier unlocked
+ * check, so if we got ILOCK_SHARED for a write and but we're now a
+ * reflink inode we have to switch to ILOCK_EXCL and relock.
+ */
+ if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+ xfs_iunlock(ip, mode);
+ mode = XFS_ILOCK_EXCL;
+ goto relock;
+ }
+
*lockmode = mode;
return 0;
}
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e040af120b69..524f543c5b82 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -258,7 +258,12 @@ xfs_trans_alloc(
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
- WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+ /*
+ * Zero-reservation ("empty") transactions can't modify anything, so
+ * they're allowed to run while we're frozen.
+ */
+ WARN_ON(resp->tr_logres > 0 &&
+ mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
atomic_inc(&mp->m_active_trans);
tp = kmem_zone_zalloc(xfs_trans_zone,