summaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2022-06-14 16:24:37 +0200
committerAl Viro <viro@zeniv.linux.org.uk>2022-08-09 04:37:16 +0200
commit2dcedb2a549a4d7430538213b1b28ef7271bc0aa (patch)
treed6af7d3df8f6c016b030e119dda9ce915f421a2e /lib
parentsplice: stop abusing iov_iter_advance() to flush a pipe (diff)
downloadlinux-2dcedb2a549a4d7430538213b1b28ef7271bc0aa.tar.xz
linux-2dcedb2a549a4d7430538213b1b28ef7271bc0aa.zip
ITER_PIPE: helper for getting pipe buffer by index
pipe_buffer instances of a pipe are organized as a ring buffer, with power-of-2 size. Indices are kept *not* reduced modulo ring size, so the buffer refered to by index N is pipe->bufs[N & (pipe->ring_size - 1)]. Ring size can change over the lifetime of a pipe, but not while the pipe is locked. So for any iov_iter primitives it's a constant. Original conversion of pipes to this layout went overboard trying to microoptimize that - calculating pipe->ring_size - 1, storing it in a local variable and using through the function. In some cases it might be warranted, but most of the times it only obfuscates what's going on in there. Introduce a helper (pipe_buf(pipe, N)) that would encapsulate that and use it in the obvious cases. More will follow... Reviewed-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: Christian Brauner (Microsoft) <brauner@kernel.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'lib')
-rw-r--r--lib/iov_iter.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index b3493d20536e..048026d5aa0d 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -183,13 +183,18 @@ static int copyin(void *to, const void __user *from, size_t n)
return n;
}
+static inline struct pipe_buffer *pipe_buf(const struct pipe_inode_info *pipe,
+ unsigned int slot)
+{
+ return &pipe->bufs[slot & (pipe->ring_size - 1)];
+}
+
#ifdef PIPE_PARANOIA
static bool sanity(const struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_head = pipe->head;
unsigned int p_tail = pipe->tail;
- unsigned int p_mask = pipe->ring_size - 1;
unsigned int p_occupancy = pipe_occupancy(p_head, p_tail);
unsigned int i_head = i->head;
unsigned int idx;
@@ -201,7 +206,7 @@ static bool sanity(const struct iov_iter *i)
if (unlikely(i_head != p_head - 1))
goto Bad; // must be at the last buffer...
- p = &pipe->bufs[i_head & p_mask];
+ p = pipe_buf(pipe, i_head);
if (unlikely(p->offset + p->len != i->iov_offset))
goto Bad; // ... at the end of segment
} else {
@@ -386,11 +391,10 @@ static inline bool allocated(struct pipe_buffer *buf)
static inline void data_start(const struct iov_iter *i,
unsigned int *iter_headp, size_t *offp)
{
- unsigned int p_mask = i->pipe->ring_size - 1;
unsigned int iter_head = i->head;
size_t off = i->iov_offset;
- if (off && (!allocated(&i->pipe->bufs[iter_head & p_mask]) ||
+ if (off && (!allocated(pipe_buf(i->pipe, iter_head)) ||
off == PAGE_SIZE)) {
iter_head++;
off = 0;
@@ -1280,10 +1284,9 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
return iov_iter_alignment_bvec(i);
if (iov_iter_is_pipe(i)) {
- unsigned int p_mask = i->pipe->ring_size - 1;
size_t size = i->count;
- if (size && i->iov_offset && allocated(&i->pipe->bufs[i->head & p_mask]))
+ if (size && i->iov_offset && allocated(pipe_buf(i->pipe, i->head)))
return size | i->iov_offset;
return size;
}