summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2018-11-28 04:32:59 +0100
committerAl Viro <viro@zeniv.linux.org.uk>2018-11-28 04:35:08 +0100
commitf91528955d0094ff2200632661d62ee64019c985 (patch)
treeba4bf0f9187672c911f2297f86a536b3b74e45e0
parentiov_iter: teach csum_and_copy_to_iter() to handle pipe-backed ones (diff)
downloadlinux-f91528955d0094ff2200632661d62ee64019c985.tar.xz
linux-f91528955d0094ff2200632661d62ee64019c985.zip
iov_iter: reduce code duplication
The same combination of csum_partial_copy_nocheck() with csum_add_block() is used in a bunch of places. Add a helper doing just that and use it. Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--lib/iov_iter.c54
1 files changed, 27 insertions, 27 deletions
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 54c248526b55..621984743268 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -560,13 +560,20 @@ static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
return bytes;
}
+static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
+ __wsum sum, size_t off)
+{
+ __wsum next = csum_partial_copy_nocheck(from, to, len, 0);
+ return csum_block_add(sum, next, off);
+}
+
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
__wsum *csum, struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
size_t n, r;
size_t off = 0;
- __wsum sum = *csum, next;
+ __wsum sum = *csum;
int idx;
if (!sanity(i))
@@ -578,8 +585,7 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
for ( ; n; idx = next_idx(idx, pipe), r = 0) {
size_t chunk = min_t(size_t, n, PAGE_SIZE - r);
char *p = kmap_atomic(pipe->bufs[idx].page);
- next = csum_partial_copy_nocheck(addr, p + r, chunk, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy(p + r, addr, chunk, sum, off);
kunmap_atomic(p);
i->idx = idx;
i->iov_offset = r + chunk;
@@ -1400,17 +1406,15 @@ size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len,
+ sum, off);
off += v.iov_len;
})
)
@@ -1444,17 +1448,15 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck(p + v.bv_offset,
- (to += v.bv_len) - v.bv_len,
- v.bv_len, 0);
+ sum = csum_and_memcpy((to += v.bv_len) - v.bv_len,
+ p + v.bv_offset, v.bv_len,
+ sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck(v.iov_base,
- (to += v.iov_len) - v.iov_len,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
+ v.iov_base, v.iov_len,
+ sum, off);
off += v.iov_len;
})
)
@@ -1491,17 +1493,15 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
err ? v.iov_len : 0;
}), ({
char *p = kmap_atomic(v.bv_page);
- next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
- p + v.bv_offset,
- v.bv_len, 0);
+ sum = csum_and_memcpy(p + v.bv_offset,
+ (from += v.bv_len) - v.bv_len,
+ v.bv_len, sum, off);
kunmap_atomic(p);
- sum = csum_block_add(sum, next, off);
off += v.bv_len;
}),({
- next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
- v.iov_base,
- v.iov_len, 0);
- sum = csum_block_add(sum, next, off);
+ sum = csum_and_memcpy(v.iov_base,
+ (from += v.iov_len) - v.iov_len,
+ v.iov_len, sum, off);
off += v.iov_len;
})
)