diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/9p/xattr.c | 8 | ||||
-rw-r--r-- | fs/btrfs/backref.c | 85 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 14 | ||||
-rw-r--r-- | fs/btrfs/ioctl.c | 2 | ||||
-rw-r--r-- | fs/btrfs/qgroup.c | 11 | ||||
-rw-r--r-- | fs/btrfs/super.c | 4 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 15 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 20 | ||||
-rw-r--r-- | fs/cifs/cifsfs.h | 5 | ||||
-rw-r--r-- | fs/cifs/cifssmb.c | 30 | ||||
-rw-r--r-- | fs/cifs/fs_context.c | 13 | ||||
-rw-r--r-- | fs/cifs/fs_context.h | 3 | ||||
-rw-r--r-- | fs/cifs/misc.c | 2 | ||||
-rw-r--r-- | fs/cifs/smb2pdu.c | 42 | ||||
-rw-r--r-- | fs/ksmbd/connection.c | 17 | ||||
-rw-r--r-- | fs/ksmbd/ksmbd_work.h | 2 | ||||
-rw-r--r-- | fs/ksmbd/server.c | 5 | ||||
-rw-r--r-- | fs/ksmbd/smb2pdu.c | 59 | ||||
-rw-r--r-- | fs/ksmbd/smb2pdu.h | 1 | ||||
-rw-r--r-- | fs/ksmbd/smb_common.c | 138 | ||||
-rw-r--r-- | fs/ksmbd/smb_common.h | 2 | ||||
-rw-r--r-- | fs/ksmbd/unicode.c | 18 | ||||
-rw-r--r-- | fs/namespace.c | 2 | ||||
-rw-r--r-- | fs/netfs/iterator.c | 2 | ||||
-rw-r--r-- | fs/nfs/Kconfig | 1 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 5 | ||||
-rw-r--r-- | fs/nfsd/blocklayout.c | 1 | ||||
-rw-r--r-- | fs/nfsd/nfs4callback.c | 4 | ||||
-rw-r--r-- | fs/nfsd/nfs4xdr.c | 15 | ||||
-rw-r--r-- | fs/zonefs/file.c | 28 |
30 files changed, 391 insertions, 163 deletions
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c index 50f7f3f6b55e..1974a38bce20 100644 --- a/fs/9p/xattr.c +++ b/fs/9p/xattr.c @@ -35,10 +35,12 @@ ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, return retval; } if (attr_size > buffer_size) { - if (!buffer_size) /* request to get the attr_size */ - retval = attr_size; - else + if (buffer_size) retval = -ERANGE; + else if (attr_size > SSIZE_MAX) + retval = -EOVERFLOW; + else /* request to get the attr_size */ + retval = attr_size; } else { iov_iter_truncate(&to, attr_size); retval = p9_client_read(attr_fid, 0, &to, &err); diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 90e40d5ceccd..e54f0884802a 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1921,8 +1921,7 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr, level = -1; ULIST_ITER_INIT(&uiter); while (1) { - bool is_shared; - bool cached; + const unsigned long prev_ref_count = ctx->refs.nnodes; walk_ctx.bytenr = bytenr; ret = find_parent_nodes(&walk_ctx, &shared); @@ -1940,21 +1939,36 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr, ret = 0; /* - * If our data extent was not directly shared (without multiple - * reference items), than it might have a single reference item - * with a count > 1 for the same offset, which means there are 2 - * (or more) file extent items that point to the data extent - - * this happens when a file extent item needs to be split and - * then one item gets moved to another leaf due to a b+tree leaf - * split when inserting some item. In this case the file extent - * items may be located in different leaves and therefore some - * of the leaves may be referenced through shared subtrees while - * others are not. Since our extent buffer cache only works for - * a single path (by far the most common case and simpler to - * deal with), we can not use it if we have multiple leaves - * (which implies multiple paths). + * More than one extent buffer (bytenr) may have been added to + * the ctx->refs ulist, in which case we have to check multiple + * tree paths in case the first one is not shared, so we can not + * use the path cache which is made for a single path. Multiple + * extent buffers at the current level happen when: + * + * 1) level -1, the data extent: If our data extent was not + * directly shared (without multiple reference items), then + * it might have a single reference item with a count > 1 for + * the same offset, which means there are 2 (or more) file + * extent items that point to the data extent - this happens + * when a file extent item needs to be split and then one + * item gets moved to another leaf due to a b+tree leaf split + * when inserting some item. In this case the file extent + * items may be located in different leaves and therefore + * some of the leaves may be referenced through shared + * subtrees while others are not. Since our extent buffer + * cache only works for a single path (by far the most common + * case and simpler to deal with), we can not use it if we + * have multiple leaves (which implies multiple paths). + * + * 2) level >= 0, a tree node/leaf: We can have a mix of direct + * and indirect references on a b+tree node/leaf, so we have + * to check multiple paths, and the extent buffer (the + * current bytenr) may be shared or not. One example is + * during relocation as we may get a shared tree block ref + * (direct ref) and a non-shared tree block ref (indirect + * ref) for the same node/leaf. */ - if (level == -1 && ctx->refs.nnodes > 1) + if ((ctx->refs.nnodes - prev_ref_count) > 1) ctx->use_path_cache = false; if (level >= 0) @@ -1964,12 +1978,17 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr, if (!node) break; bytenr = node->val; - level++; - cached = lookup_backref_shared_cache(ctx, root, bytenr, level, - &is_shared); - if (cached) { - ret = (is_shared ? 1 : 0); - break; + if (ctx->use_path_cache) { + bool is_shared; + bool cached; + + level++; + cached = lookup_backref_shared_cache(ctx, root, bytenr, + level, &is_shared); + if (cached) { + ret = (is_shared ? 1 : 0); + break; + } } shared.share_count = 0; shared.have_delayed_delete_refs = false; @@ -1977,6 +1996,28 @@ int btrfs_is_data_extent_shared(struct btrfs_inode *inode, u64 bytenr, } /* + * If the path cache is disabled, then it means at some tree level we + * got multiple parents due to a mix of direct and indirect backrefs or + * multiple leaves with file extent items pointing to the same data + * extent. We have to invalidate the cache and cache only the sharedness + * result for the levels where we got only one node/reference. + */ + if (!ctx->use_path_cache) { + int i = 0; + + level--; + if (ret >= 0 && level >= 0) { + bytenr = ctx->path_cache_entries[level].bytenr; + ctx->use_path_cache = true; + store_backref_shared_cache(ctx, root, bytenr, level, ret); + i = level + 1; + } + + for ( ; i < BTRFS_MAX_LEVEL; i++) + ctx->path_cache_entries[i].bytenr = 0; + } + + /* * Cache the sharedness result for the data extent if we know our inode * has more than 1 file extent item that refers to the data extent. */ diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index b53f0e30ce2b..9e1596bb208d 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2250,6 +2250,20 @@ static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) fs_info->csum_shash = csum_shash; + /* + * Check if the checksum implementation is a fast accelerated one. + * As-is this is a bit of a hack and should be replaced once the csum + * implementations provide that information themselves. + */ + switch (csum_type) { + case BTRFS_CSUM_TYPE_CRC32: + if (!strstr(crypto_shash_driver_name(csum_shash), "generic")) + set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); + break; + default: + break; + } + btrfs_info(fs_info, "using %s (%s) checksum algorithm", btrfs_super_csum_name(csum_type), crypto_shash_driver_name(csum_shash)); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a0ef1a1784c7..ba769a1eb87a 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3732,7 +3732,9 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) } /* update qgroup status and info */ + mutex_lock(&fs_info->qgroup_ioctl_lock); err = btrfs_run_qgroups(trans); + mutex_unlock(&fs_info->qgroup_ioctl_lock); if (err < 0) btrfs_handle_fs_error(fs_info, err, "failed to update qgroup status and info"); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 52a7d2fa2284..f41da7ac360d 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -2828,13 +2828,22 @@ cleanup: } /* - * called from commit_transaction. Writes all changed qgroups to disk. + * Writes all changed qgroups to disk. + * Called by the transaction commit path and the qgroup assign ioctl. */ int btrfs_run_qgroups(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; int ret = 0; + /* + * In case we are called from the qgroup assign ioctl, assert that we + * are holding the qgroup_ioctl_lock, otherwise we can race with a quota + * disable operation (ioctl) and access a freed quota root. + */ + if (trans->transaction->state != TRANS_STATE_COMMIT_DOING) + lockdep_assert_held(&fs_info->qgroup_ioctl_lock); + if (!fs_info->quota_root) return ret; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 581845bc206a..366fb4cde145 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1516,8 +1516,6 @@ static struct dentry *btrfs_mount_root(struct file_system_type *fs_type, shrinker_debugfs_rename(&s->s_shrink, "sb-%s:%s", fs_type->name, s->s_id); btrfs_sb(s)->bdev_holder = fs_type; - if (!strstr(crc32c_impl(), "generic")) - set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); error = btrfs_fill_super(s, fs_devices, data); } if (!error) @@ -1631,6 +1629,8 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); + workqueue_set_max_active(fs_info->endio_workers, new_pool_size); + workqueue_set_max_active(fs_info->endio_meta_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_write_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_freespace_worker, new_pool_size); btrfs_workqueue_set_max(fs_info->delayed_workers, new_pool_size); diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 18329ebcb1cb..b8d5b1fa9a03 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2035,7 +2035,20 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) if (current->journal_info == trans) current->journal_info = NULL; - btrfs_scrub_cancel(fs_info); + + /* + * If relocation is running, we can't cancel scrub because that will + * result in a deadlock. Before relocating a block group, relocation + * pauses scrub, then starts and commits a transaction before unpausing + * scrub. If the transaction commit is being done by the relocation + * task or triggered by another task and the relocation task is waiting + * for the commit, and we end up here due to an error in the commit + * path, then calling btrfs_scrub_cancel() will deadlock, as we are + * asking for scrub to stop while having it asked to be paused higher + * above in relocation code. + */ + if (!test_bit(BTRFS_FS_RELOC_RUNNING, &fs_info->flags)) + btrfs_scrub_cancel(fs_info); kmem_cache_free(btrfs_trans_handle_cachep, trans); } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6d0124b6e79e..c6d592870400 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1366,8 +1366,17 @@ struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags, * So, we need to add a special mount option to scan for * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ - flags |= FMODE_EXCL; + /* + * Avoid using flag |= FMODE_EXCL here, as the systemd-udev may + * initiate the device scan which may race with the user's mount + * or mkfs command, resulting in failure. + * Since the device scan is solely for reading purposes, there is + * no need for FMODE_EXCL. Additionally, the devices are read again + * during the mount process. It is ok to get some inconsistent + * values temporarily, as the device paths of the fsid are the only + * required information for assembling the volume. + */ bdev = blkdev_get_by_path(path, flags, holder); if (IS_ERR(bdev)) return ERR_CAST(bdev); @@ -3266,8 +3275,15 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) btrfs_scrub_pause(fs_info); ret = btrfs_relocate_block_group(fs_info, chunk_offset); btrfs_scrub_continue(fs_info); - if (ret) + if (ret) { + /* + * If we had a transaction abort, stop all running scrubs. + * See transaction.c:cleanup_transaction() why we do it here. + */ + if (BTRFS_FS_ERROR(fs_info)) + btrfs_scrub_cancel(fs_info); return ret; + } block_group = btrfs_lookup_block_group(fs_info, chunk_offset); if (!block_group) diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 71fe0a0a7992..415176b2cf32 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -124,7 +124,10 @@ extern const struct dentry_operations cifs_ci_dentry_ops; #ifdef CONFIG_CIFS_DFS_UPCALL extern struct vfsmount *cifs_dfs_d_automount(struct path *path); #else -#define cifs_dfs_d_automount NULL +static inline struct vfsmount *cifs_dfs_d_automount(struct path *path) +{ + return ERR_PTR(-EREMOTE); +} #endif /* Functions related to symlinks */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 38a697eca305..9d963caec35c 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -71,7 +71,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) int rc; struct cifs_ses *ses; struct TCP_Server_Info *server; - struct nls_table *nls_codepage; + struct nls_table *nls_codepage = NULL; /* * SMBs NegProt, SessSetup, uLogoff do not have tcon yet so check for @@ -99,6 +99,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) } spin_unlock(&tcon->tc_lock); +again: rc = cifs_wait_for_server_reconnect(server, tcon->retry); if (rc) return rc; @@ -110,8 +111,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) } spin_unlock(&ses->chan_lock); - nls_codepage = load_nls_default(); - + mutex_lock(&ses->session_mutex); /* * Recheck after acquire mutex. If another thread is negotiating * and the server never sends an answer the socket will be closed @@ -120,29 +120,38 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { spin_unlock(&server->srv_lock); + mutex_unlock(&ses->session_mutex); + + if (tcon->retry) + goto again; rc = -EHOSTDOWN; goto out; } spin_unlock(&server->srv_lock); + nls_codepage = load_nls_default(); + /* * need to prevent multiple threads trying to simultaneously * reconnect the same SMB session */ + spin_lock(&ses->ses_lock); spin_lock(&ses->chan_lock); - if (!cifs_chan_needs_reconnect(ses, server)) { + if (!cifs_chan_needs_reconnect(ses, server) && + ses->ses_status == SES_GOOD) { spin_unlock(&ses->chan_lock); + spin_unlock(&ses->ses_lock); /* this means that we only need to tree connect */ if (tcon->need_reconnect) goto skip_sess_setup; - rc = -EHOSTDOWN; + mutex_unlock(&ses->session_mutex); goto out; } spin_unlock(&ses->chan_lock); + spin_unlock(&ses->ses_lock); - mutex_lock(&ses->session_mutex); rc = cifs_negotiate_protocol(0, ses, server); if (!rc) rc = cifs_setup_session(0, ses, server, nls_codepage); @@ -4373,8 +4382,13 @@ CIFSGetDFSRefer(const unsigned int xid, struct cifs_ses *ses, return -ENODEV; getDFSRetry: - rc = smb_init(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, (void **) &pSMB, - (void **) &pSMBr); + /* + * Use smb_init_no_reconnect() instead of smb_init() as + * CIFSGetDFSRefer() may be called from cifs_reconnect_tcon() and thus + * causing an infinite recursion. + */ + rc = smb_init_no_reconnect(SMB_COM_TRANSACTION2, 15, ses->tcon_ipc, + (void **)&pSMB, (void **)&pSMBr); if (rc) return rc; diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c index 6d13f8207e96..ace11a1a7c8a 100644 --- a/fs/cifs/fs_context.c +++ b/fs/cifs/fs_context.c @@ -441,13 +441,14 @@ out: * but there are some bugs that prevent rename from working if there are * multiple delimiters. * - * Returns a sanitized duplicate of @path. The caller is responsible for - * cleaning up the original. + * Returns a sanitized duplicate of @path. @gfp indicates the GFP_* flags + * for kstrdup. + * The caller is responsible for freeing the original. */ #define IS_DELIM(c) ((c) == '/' || (c) == '\\') -static char *sanitize_path(char *path) +char *cifs_sanitize_prepath(char *prepath, gfp_t gfp) { - char *cursor1 = path, *cursor2 = path; + char *cursor1 = prepath, *cursor2 = prepath; /* skip all prepended delimiters */ while (IS_DELIM(*cursor1)) @@ -469,7 +470,7 @@ static char *sanitize_path(char *path) cursor2--; *(cursor2) = '\0'; - return kstrdup(path, GFP_KERNEL); + return kstrdup(prepath, gfp); } /* @@ -531,7 +532,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx) if (!*pos) return 0; - ctx->prepath = sanitize_path(pos); + ctx->prepath = cifs_sanitize_prepath(pos, GFP_KERNEL); if (!ctx->prepath) return -ENOMEM; diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h index 3de00e7127ec..f4eaf8558902 100644 --- a/fs/cifs/fs_context.h +++ b/fs/cifs/fs_context.h @@ -287,4 +287,7 @@ extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb); */ #define SMB3_MAX_DCLOSETIMEO (1 << 30) #define SMB3_DEF_DCLOSETIMEO (1 * HZ) /* even 1 sec enough to help eg open/write/close/open/read */ + +extern char *cifs_sanitize_prepath(char *prepath, gfp_t gfp); + #endif diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index b44fb51968bf..7f085ed2d866 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -1195,7 +1195,7 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix) kfree(cifs_sb->prepath); if (prefix && *prefix) { - cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC); + cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC); if (!cifs_sb->prepath) return -ENOMEM; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 6bd2aa6af18f..4245249dbba8 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -310,7 +310,6 @@ out: case SMB2_READ: case SMB2_WRITE: case SMB2_LOCK: - case SMB2_IOCTL: case SMB2_QUERY_DIRECTORY: case SMB2_CHANGE_NOTIFY: case SMB2_QUERY_INFO: @@ -588,11 +587,15 @@ assemble_neg_contexts(struct smb2_negotiate_req *req, } +/* If invalid preauth context warn but use what we requested, SHA-512 */ static void decode_preauth_context(struct smb2_preauth_neg_context *ctxt) { unsigned int len = le16_to_cpu(ctxt->DataLength); - /* If invalid preauth context warn but use what we requested, SHA-512 */ + /* + * Caller checked that DataLength remains within SMB boundary. We still + * need to confirm that one HashAlgorithms member is accounted for. + */ if (len < MIN_PREAUTH_CTXT_DATA_LEN) { pr_warn_once("server sent bad preauth context\n"); return; @@ -611,7 +614,11 @@ static void decode_compress_ctx(struct TCP_Server_Info *server, { unsigned int len = le16_to_cpu(ctxt->DataLength); - /* sizeof compress context is a one element compression capbility struct */ + /* + * Caller checked that DataLength remains within SMB boundary. We still + * need to confirm that one CompressionAlgorithms member is accounted + * for. + */ if (len < 10) { pr_warn_once("server sent bad compression cntxt\n"); return; @@ -633,6 +640,11 @@ static int decode_encrypt_ctx(struct TCP_Server_Info *server, unsigned int len = le16_to_cpu(ctxt->DataLength); cifs_dbg(FYI, "decode SMB3.11 encryption neg context of len %d\n", len); + /* + * Caller checked that DataLength remains within SMB boundary. We still + * need to confirm that one Cipher flexible array member is accounted + * for. + */ if (len < MIN_ENCRYPT_CTXT_DATA_LEN) { pr_warn_once("server sent bad crypto ctxt len\n"); return -EINVAL; @@ -679,6 +691,11 @@ static void decode_signing_ctx(struct TCP_Server_Info *server, { unsigned int len = le16_to_cpu(pctxt->DataLength); + /* + * Caller checked that DataLength remains within SMB boundary. We still + * need to confirm that one SigningAlgorithms flexible array member is + * accounted for. + */ if ((len < 4) || (len > 16)) { pr_warn_once("server sent bad signing negcontext\n"); return; @@ -720,14 +737,19 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, for (i = 0; i < ctxt_cnt; i++) { int clen; /* check that offset is not beyond end of SMB */ - if (len_of_ctxts == 0) - break; - if (len_of_ctxts < sizeof(struct smb2_neg_context)) break; pctx = (struct smb2_neg_context *)(offset + (char *)rsp); - clen = le16_to_cpu(pctx->DataLength); + clen = sizeof(struct smb2_neg_context) + + le16_to_cpu(pctx->DataLength); + /* + * 2.2.4 SMB2 NEGOTIATE Response + * Subsequent negotiate contexts MUST appear at the first 8-byte + * aligned offset following the previous negotiate context. + */ + if (i + 1 != ctxt_cnt) + clen = ALIGN(clen, 8); if (clen > len_of_ctxts) break; @@ -748,12 +770,10 @@ static int smb311_decode_neg_context(struct smb2_negotiate_rsp *rsp, else cifs_server_dbg(VFS, "unknown negcontext of type %d ignored\n", le16_to_cpu(pctx->ContextType)); - if (rc) break; - /* offsets must be 8 byte aligned */ - clen = ALIGN(clen, 8); - offset += clen + sizeof(struct smb2_neg_context); + + offset += clen; len_of_ctxts -= clen; } return rc; diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c index 115a67d2cf78..365ac32af505 100644 --- a/fs/ksmbd/connection.c +++ b/fs/ksmbd/connection.c @@ -112,10 +112,8 @@ void ksmbd_conn_enqueue_request(struct ksmbd_work *work) struct ksmbd_conn *conn = work->conn; struct list_head *requests_queue = NULL; - if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) { + if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) requests_queue = &conn->requests; - work->synchronous = true; - } if (requests_queue) { atomic_inc(&conn->req_running); @@ -136,14 +134,14 @@ int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work) if (!work->multiRsp) atomic_dec(&conn->req_running); - spin_lock(&conn->request_lock); if (!work->multiRsp) { + spin_lock(&conn->request_lock); list_del_init(&work->request_entry); - if (!work->synchronous) - list_del_init(&work->async_request_entry); + spin_unlock(&conn->request_lock); + if (work->asynchronous) + release_async_work(work); ret = 0; } - spin_unlock(&conn->request_lock); wake_up_all(&conn->req_running_q); return ret; @@ -326,10 +324,7 @@ int ksmbd_conn_handler_loop(void *p) /* 4 for rfc1002 length field */ size = pdu_size + 4; - conn->request_buf = kvmalloc(size, - GFP_KERNEL | - __GFP_NOWARN | - __GFP_NORETRY); + conn->request_buf = kvmalloc(size, GFP_KERNEL); if (!conn->request_buf) break; diff --git a/fs/ksmbd/ksmbd_work.h b/fs/ksmbd/ksmbd_work.h index 3234f2cf6327..f8ae6144c0ae 100644 --- a/fs/ksmbd/ksmbd_work.h +++ b/fs/ksmbd/ksmbd_work.h @@ -68,7 +68,7 @@ struct ksmbd_work { /* Request is encrypted */ bool encrypted:1; /* Is this SYNC or ASYNC ksmbd_work */ - bool synchronous:1; + bool asynchronous:1; bool need_invalidate_rkey:1; unsigned int remote_key; diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c index 394b6ceac431..0d8242789dc8 100644 --- a/fs/ksmbd/server.c +++ b/fs/ksmbd/server.c @@ -289,10 +289,7 @@ static int queue_ksmbd_work(struct ksmbd_conn *conn) work->request_buf = conn->request_buf; conn->request_buf = NULL; - if (ksmbd_init_smb_server(work)) { - ksmbd_free_work_struct(work); - return -EINVAL; - } + ksmbd_init_smb_server(work); ksmbd_conn_enqueue_request(work); atomic_inc(&conn->r_count); diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 97c9d1b5bcc0..67b7e766a06b 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -229,9 +229,6 @@ int init_smb2_neg_rsp(struct ksmbd_work *work) struct smb2_negotiate_rsp *rsp; struct ksmbd_conn *conn = work->conn; - if (conn->need_neg == false) - return -EINVAL; - *(__be32 *)work->response_buf = cpu_to_be32(conn->vals->header_size); @@ -498,12 +495,6 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work) rsp_hdr->SessionId = rcv_hdr->SessionId; memcpy(rsp_hdr->Signature, rcv_hdr->Signature, 16); - work->synchronous = true; - if (work->async_id) { - ksmbd_release_id(&conn->async_ida, work->async_id); - work->async_id = 0; - } - return 0; } @@ -644,7 +635,7 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg) pr_err("Failed to alloc async message id\n"); return id; } - work->synchronous = false; + work->asynchronous = true; work->async_id = id; rsp_hdr->Id.AsyncId = cpu_to_le64(id); @@ -664,6 +655,24 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg) return 0; } +void release_async_work(struct ksmbd_work *work) +{ + struct ksmbd_conn *conn = work->conn; + + spin_lock(&conn->request_lock); + list_del_init(&work->async_request_entry); + spin_unlock(&conn->request_lock); + + work->asynchronous = 0; + work->cancel_fn = NULL; + kfree(work->cancel_argv); + work->cancel_argv = NULL; + if (work->async_id) { + ksmbd_release_id(&conn->async_ida, work->async_id); + work->async_id = 0; + } +} + void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status) { struct smb2_hdr *rsp_hdr; @@ -867,17 +876,21 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn, } static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn, - struct smb2_preauth_neg_context *pneg_ctxt) + struct smb2_preauth_neg_context *pneg_ctxt, + int len_of_ctxts) { - __le32 err = STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP; + /* + * sizeof(smb2_preauth_neg_context) assumes SMB311_SALT_SIZE Salt, + * which may not be present. Only check for used HashAlgorithms[1]. + */ + if (len_of_ctxts < MIN_PREAUTH_CTXT_DATA_LEN) + return STATUS_INVALID_PARAMETER; - if (pneg_ctxt->HashAlgorithms == SMB2_PREAUTH_INTEGRITY_SHA512) { - conn->preauth_info->Preauth_HashId = - SMB2_PREAUTH_INTEGRITY_SHA512; - err = STATUS_SUCCESS; - } + if (pneg_ctxt->HashAlgorithms != SMB2_PREAUTH_INTEGRITY_SHA512) + return STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP; - return err; + conn->preauth_info->Preauth_HashId = SMB2_PREAUTH_INTEGRITY_SHA512; + return STATUS_SUCCESS; } static void decode_encrypt_ctxt(struct ksmbd_conn *conn, @@ -1005,7 +1018,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn, break; status = decode_preauth_ctxt(conn, - (struct smb2_preauth_neg_context *)pctx); + (struct smb2_preauth_neg_context *)pctx, + len_of_ctxts); if (status != STATUS_SUCCESS) break; } else if (pctx->ContextType == SMB2_ENCRYPTION_CAPABILITIES) { @@ -7045,13 +7059,9 @@ skip: ksmbd_vfs_posix_lock_wait(flock); - spin_lock(&work->conn->request_lock); spin_lock(&fp->f_lock); list_del(&work->fp_entry); - work->cancel_fn = NULL; - kfree(argv); spin_unlock(&fp->f_lock); - spin_unlock(&work->conn->request_lock); if (work->state != KSMBD_WORK_ACTIVE) { list_del(&smb_lock->llist); @@ -7069,6 +7079,7 @@ skip: work->send_no_response = 1; goto out; } + init_smb2_rsp_hdr(work); smb2_set_err_rsp(work); rsp->hdr.Status = @@ -7081,7 +7092,7 @@ skip: spin_lock(&work->conn->llist_lock); list_del(&smb_lock->clist); spin_unlock(&work->conn->llist_lock); - + release_async_work(work); goto retry; } else if (!rc) { spin_lock(&work->conn->llist_lock); diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h index 0c8a770fe318..9420dd2813fb 100644 --- a/fs/ksmbd/smb2pdu.h +++ b/fs/ksmbd/smb2pdu.h @@ -486,6 +486,7 @@ int find_matching_smb2_dialect(int start_index, __le16 *cli_dialects, struct file_lock *smb_flock_init(struct file *f); int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg); +void release_async_work(struct ksmbd_work *work); void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status); struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn *conn); diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c index 9c1ce6d199ce..af0c2a9b8529 100644 --- a/fs/ksmbd/smb_common.c +++ b/fs/ksmbd/smb_common.c @@ -283,20 +283,121 @@ err_out: return BAD_PROT_ID; } -int ksmbd_init_smb_server(struct ksmbd_work *work) +#define SMB_COM_NEGOTIATE_EX 0x0 + +/** + * get_smb1_cmd_val() - get smb command value from smb header + * @work: smb work containing smb header + * + * Return: smb command value + */ +static u16 get_smb1_cmd_val(struct ksmbd_work *work) { - struct ksmbd_conn *conn = work->conn; + return SMB_COM_NEGOTIATE_EX; +} - if (conn->need_neg == false) +/** + * init_smb1_rsp_hdr() - initialize smb negotiate response header + * @work: smb work containing smb request + * + * Return: 0 on success, otherwise -EINVAL + */ +static int init_smb1_rsp_hdr(struct ksmbd_work *work) +{ + struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf; + struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf; + + /* + * Remove 4 byte direct TCP header. + */ + *(__be32 *)work->response_buf = + cpu_to_be32(sizeof(struct smb_hdr) - 4); + + rsp_hdr->Command = SMB_COM_NEGOTIATE; + *(__le32 *)rsp_hdr->Protocol = SMB1_PROTO_NUMBER; + rsp_hdr->Flags = SMBFLG_RESPONSE; + rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS | + SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME; + rsp_hdr->Pid = rcv_hdr->Pid; + rsp_hdr->Mid = rcv_hdr->Mid; + return 0; +} + +/** + * smb1_check_user_session() - check for valid session for a user + * @work: smb work containing smb request buffer + * + * Return: 0 on success, otherwise error + */ +static int smb1_check_user_session(struct ksmbd_work *work) +{ + unsigned int cmd = work->conn->ops->get_cmd_val(work); + + if (cmd == SMB_COM_NEGOTIATE_EX) return 0; - init_smb3_11_server(conn); + return -EINVAL; +} + +/** + * smb1_allocate_rsp_buf() - allocate response buffer for a command + * @work: smb work containing smb request + * + * Return: 0 on success, otherwise -ENOMEM + */ +static int smb1_allocate_rsp_buf(struct ksmbd_work *work) +{ + work->response_buf = kmalloc(MAX_CIFS_SMALL_BUFFER_SIZE, + GFP_KERNEL | __GFP_ZERO); + work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE; + + if (!work->response_buf) { + pr_err("Failed to allocate %u bytes buffer\n", + MAX_CIFS_SMALL_BUFFER_SIZE); + return -ENOMEM; + } - if (conn->ops->get_cmd_val(work) != SMB_COM_NEGOTIATE) - conn->need_neg = false; return 0; } +static struct smb_version_ops smb1_server_ops = { + .get_cmd_val = get_smb1_cmd_val, + .init_rsp_hdr = init_smb1_rsp_hdr, + .allocate_rsp_buf = smb1_allocate_rsp_buf, + .check_user_session = smb1_check_user_session, +}; + +static int smb1_negotiate(struct ksmbd_work *work) +{ + return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE); +} + +static struct smb_version_cmds smb1_server_cmds[1] = { + [SMB_COM_NEGOTIATE_EX] = { .proc = smb1_negotiate, }, +}; + +static void init_smb1_server(struct ksmbd_conn *conn) +{ + conn->ops = &smb1_server_ops; + conn->cmds = smb1_server_cmds; + conn->max_cmds = ARRAY_SIZE(smb1_server_cmds); +} + +void ksmbd_init_smb_server(struct ksmbd_work *work) +{ + struct ksmbd_conn *conn = work->conn; + __le32 proto; + + if (conn->need_neg == false) + return; + + proto = *(__le32 *)((struct smb_hdr *)work->request_buf)->Protocol; + if (proto == SMB1_PROTO_NUMBER) + init_smb1_server(conn); + else + init_smb3_11_server(conn); +} + int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level, struct ksmbd_file *dir, struct ksmbd_dir_info *d_info, @@ -444,20 +545,10 @@ static int smb_handle_negotiate(struct ksmbd_work *work) ksmbd_debug(SMB, "Unsupported SMB1 protocol\n"); - /* - * Remove 4 byte direct TCP header, add 2 byte bcc and - * 2 byte DialectIndex. - */ - *(__be32 *)work->response_buf = - cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2); + /* Add 2 byte bcc and 2 byte DialectIndex. */ + inc_rfc1001_len(work->response_buf, 4); neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS; - neg_rsp->hdr.Command = SMB_COM_NEGOTIATE; - *(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER; - neg_rsp->hdr.Flags = SMBFLG_RESPONSE; - neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS | - SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME; - neg_rsp->hdr.WordCount = 1; neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect); neg_rsp->ByteCount = 0; @@ -474,23 +565,12 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command) ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect); if (command == SMB2_NEGOTIATE_HE) { - struct smb2_hdr *smb2_hdr = smb2_get_msg(work->request_buf); - - if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) { - ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n"); - command = SMB_COM_NEGOTIATE; - } - } - - if (command == SMB2_NEGOTIATE_HE) { ret = smb2_handle_negotiate(work); - init_smb2_neg_rsp(work); return ret; } if (command == SMB_COM_NEGOTIATE) { if (__smb2_negotiate(conn)) { - conn->need_neg = true; init_smb3_11_server(conn); init_smb2_neg_rsp(work); ksmbd_debug(SMB, "Upgrade to SMB2 negotiation\n"); diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h index d30ce4c1a151..9130d2e3cd78 100644 --- a/fs/ksmbd/smb_common.h +++ b/fs/ksmbd/smb_common.h @@ -427,7 +427,7 @@ bool ksmbd_smb_request(struct ksmbd_conn *conn); int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count); -int ksmbd_init_smb_server(struct ksmbd_work *work); +void ksmbd_init_smb_server(struct ksmbd_work *work); struct ksmbd_kstat; int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, diff --git a/fs/ksmbd/unicode.c b/fs/ksmbd/unicode.c index a0db699ddafd..9ae676906ed3 100644 --- a/fs/ksmbd/unicode.c +++ b/fs/ksmbd/unicode.c @@ -114,24 +114,6 @@ cp_convert: } /* - * is_char_allowed() - check for valid character - * @ch: input character to be checked - * - * Return: 1 if char is allowed, otherwise 0 - */ -static inline int is_char_allowed(char *ch) -{ - /* check for control chars, wildcards etc. */ - if (!(*ch & 0x80) && - (*ch <= 0x1f || - *ch == '?' || *ch == '"' || *ch == '<' || - *ch == '>' || *ch == '|')) - return 0; - - return 1; -} - -/* * smb_from_utf16() - convert utf16le string to local charset * @to: destination buffer * @from: source buffer diff --git a/fs/namespace.c b/fs/namespace.c index bc0f15257b49..6836e937ee61 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -4183,9 +4183,9 @@ out: unlock_mount_hash(); if (kattr->propagation) { - namespace_unlock(); if (err) cleanup_group_ids(mnt, NULL); + namespace_unlock(); } return err; diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c index e9a45dea748a..8a4c86687429 100644 --- a/fs/netfs/iterator.c +++ b/fs/netfs/iterator.c @@ -139,7 +139,7 @@ static ssize_t netfs_extract_user_to_sg(struct iov_iter *iter, size_t seg = min_t(size_t, PAGE_SIZE - off, len); *pages++ = NULL; - sg_set_page(sg, page, len, off); + sg_set_page(sg, page, seg, off); sgtable->nents++; sg++; len -= seg; diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig index 450d6c3bc05e..c1c7ed2fd860 100644 --- a/fs/nfs/Kconfig +++ b/fs/nfs/Kconfig @@ -75,7 +75,6 @@ config NFS_V3_ACL config NFS_V4 tristate "NFS client support for NFS version 4" depends on NFS_FS - select RPCSEC_GSS_KRB5 select KEYS help This option enables support for version 4 of the NFS protocol diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 22a93ae46cd7..5607b1e2b821 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1980,8 +1980,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) if (!data->rpc_done) { if (data->rpc_status) return ERR_PTR(data->rpc_status); - /* cached opens have already been processed */ - goto update; + return nfs4_try_open_cached(data); } ret = nfs_refresh_inode(inode, &data->f_attr); @@ -1990,7 +1989,7 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) if (data->o_res.delegation_type != 0) nfs4_opendata_check_deleg(data, state); -update: + if (!update_open_stateid(state, &data->o_res.stateid, NULL, data->o_arg.fmode)) return ERR_PTR(-EAGAIN); diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 04697f8dc37d..01d7fd108cf3 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -297,6 +297,7 @@ nfsd4_block_get_device_info_scsi(struct super_block *sb, out_free_dev: kfree(dev); + gdp->gd_device = NULL; return ret; } diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 2a815f5a52c4..4039ffcf90ba 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -946,8 +946,8 @@ static const struct cred *get_backchannel_cred(struct nfs4_client *clp, struct r if (!kcred) return NULL; - kcred->uid = ses->se_cb_sec.uid; - kcred->gid = ses->se_cb_sec.gid; + kcred->fsuid = ses->se_cb_sec.uid; + kcred->fsgid = ses->se_cb_sec.gid; return kcred; } } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index e12e5a4ad502..e2e485167ac4 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -2476,10 +2476,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) for (i = 0; i < argp->opcnt; i++) { op = &argp->ops[i]; op->replay = NULL; + op->opdesc = NULL; if (xdr_stream_decode_u32(argp->xdr, &op->opnum) < 0) return false; if (nfsd4_opnum_in_range(argp, op)) { + op->opdesc = OPDESC(op); op->status = nfsd4_dec_ops[op->opnum](argp, &op->u); if (op->status != nfs_ok) trace_nfsd_compound_decode_err(argp->rqstp, @@ -2490,7 +2492,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp) op->opnum = OP_ILLEGAL; op->status = nfserr_op_illegal; } - op->opdesc = OPDESC(op); + /* * We'll try to cache the result in the DRC if any one * op in the compound wants to be cached: @@ -5400,10 +5402,8 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) __be32 *p; p = xdr_reserve_space(xdr, 8); - if (!p) { - WARN_ON_ONCE(1); - return; - } + if (!p) + goto release; *p++ = cpu_to_be32(op->opnum); post_err_offset = xdr->buf->len; @@ -5418,8 +5418,6 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) op->status = encoder(resp, op->status, &op->u); if (op->status) trace_nfsd_compound_encode_err(rqstp, op->opnum, op->status); - if (opdesc && opdesc->op_release) - opdesc->op_release(&op->u); xdr_commit_encode(xdr); /* nfsd4_check_resp_size guarantees enough room for error status */ @@ -5460,6 +5458,9 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) } status: *p = op->status; +release: + if (opdesc && opdesc->op_release) + opdesc->op_release(&op->u); } /* diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c index 617e4f9db42e..132f01d3461f 100644 --- a/fs/zonefs/file.c +++ b/fs/zonefs/file.c @@ -382,6 +382,7 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) struct zonefs_zone *z = zonefs_inode_zone(inode); struct block_device *bdev = inode->i_sb->s_bdev; unsigned int max = bdev_max_zone_append_sectors(bdev); + pgoff_t start, end; struct bio *bio; ssize_t size = 0; int nr_pages; @@ -390,6 +391,19 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from) max = ALIGN_DOWN(max << SECTOR_SHIFT, inode->i_sb->s_blocksize); iov_iter_truncate(from, max); + /* + * If the inode block size (zone write granularity) is smaller than the + * page size, we may be appending data belonging to the last page of the + * inode straddling inode->i_size, with that page already cached due to + * a buffered read or readahead. So make sure to invalidate that page. + * This will always be a no-op for the case where the block size is + * equal to the page size. + */ + start = iocb->ki_pos >> PAGE_SHIFT; + end = (iocb->ki_pos + iov_iter_count(from) - 1) >> PAGE_SHIFT; + if (invalidate_inode_pages2_range(inode->i_mapping, start, end)) + return -EBUSY; + nr_pages = iov_iter_npages(from, BIO_MAX_VECS); if (!nr_pages) return 0; @@ -567,11 +581,21 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) append = sync; } - if (append) + if (append) { ret = zonefs_file_dio_append(iocb, from); - else + } else { + /* + * iomap_dio_rw() may return ENOTBLK if there was an issue with + * page invalidation. Overwrite that error code with EBUSY to + * be consistent with zonefs_file_dio_append() return value for + * similar issues. + */ ret = iomap_dio_rw(iocb, from, &zonefs_write_iomap_ops, &zonefs_write_dio_ops, 0, NULL, 0); + if (ret == -ENOTBLK) + ret = -EBUSY; + } + if (zonefs_zone_is_seq(z) && (ret > 0 || ret == -EIOCBQUEUED)) { if (ret > 0) |