summaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 22:42:26 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2020-12-16 22:42:26 +0100
commit009bd55dfcc857d8b00a5bbb17a8db060317af6f (patch)
tree3a623fc690ea03bd76630c5bcc003324136ae0f6 /tools
parentMerge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi (diff)
parentRDMA/cma: Don't overwrite sgid_attr after device is released (diff)
downloadlinux-009bd55dfcc857d8b00a5bbb17a8db060317af6f.tar.xz
linux-009bd55dfcc857d8b00a5bbb17a8db060317af6f.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "A smaller set of patches, nothing stands out as being particularly major this cycle. The biggest item would be the new HIP09 HW support from HNS, otherwise it was pretty quiet for new work here: - Driver bug fixes and updates: bnxt_re, cxgb4, rxe, hns, i40iw, cxgb4, mlx4 and mlx5 - Bug fixes and polishing for the new rts ULP - Cleanup of uverbs checking for allowed driver operations - Use sysfs_emit all over the place - Lots of bug fixes and clarity improvements for hns - hip09 support for hns - NDR and 50/100Gb signaling rates - Remove dma_virt_ops and go back to using the IB DMA wrappers - mlx5 optimizations for contiguous DMA regions" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (147 commits) RDMA/cma: Don't overwrite sgid_attr after device is released RDMA/mlx5: Fix MR cache memory leak RDMA/rxe: Use acquire/release for memory ordering RDMA/hns: Simplify AEQE process for different types of queue RDMA/hns: Fix inaccurate prints RDMA/hns: Fix incorrect symbol types RDMA/hns: Clear redundant variable initialization RDMA/hns: Fix coding style issues RDMA/hns: Remove unnecessary access right set during INIT2INIT RDMA/hns: WARN_ON if get a reserved sl from users RDMA/hns: Avoid filling sl in high 3 bits of vlan_id RDMA/hns: Do shift on traffic class when using RoCEv2 RDMA/hns: Normalization the judgment of some features RDMA/hns: Limit the length of data copied between kernel and userspace RDMA/mlx4: Remove bogus dev_base_lock usage RDMA/uverbs: Fix incorrect variable type RDMA/core: Do not indicate device ready when device enablement fails RDMA/core: Clean up cq pool mechanism RDMA/core: Update kernel documentation for ib_create_named_qp() MAINTAINERS: SOFT-ROCE: Change Zhu Yanjun's email address ...
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/scatterlist/main.c64
1 files changed, 41 insertions, 23 deletions
diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c
index 8a577c95496e..71c960dcd8a4 100644
--- a/tools/testing/scatterlist/main.c
+++ b/tools/testing/scatterlist/main.c
@@ -9,6 +9,7 @@ struct test {
int alloc_ret;
unsigned num_pages;
unsigned *pfn;
+ unsigned *pfn_app;
unsigned size;
unsigned int max_seg;
unsigned int expected_segments;
@@ -52,31 +53,39 @@ int main(void)
{
const unsigned int sgmax = UINT_MAX;
struct test *test, tests[] = {
- { -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
- { 0, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
- { 0, 1, pfn(0), PAGE_SIZE, sgmax + 1, 1 },
- { 0, 1, pfn(0), PAGE_SIZE, sgmax, 1 },
- { 0, 1, pfn(0), 1, sgmax, 1 },
- { 0, 2, pfn(0, 1), 2 * PAGE_SIZE, sgmax, 1 },
- { 0, 2, pfn(1, 0), 2 * PAGE_SIZE, sgmax, 2 },
- { 0, 3, pfn(0, 1, 2), 3 * PAGE_SIZE, sgmax, 1 },
- { 0, 3, pfn(0, 2, 1), 3 * PAGE_SIZE, sgmax, 3 },
- { 0, 3, pfn(0, 1, 3), 3 * PAGE_SIZE, sgmax, 2 },
- { 0, 3, pfn(1, 2, 4), 3 * PAGE_SIZE, sgmax, 2 },
- { 0, 3, pfn(1, 3, 4), 3 * PAGE_SIZE, sgmax, 2 },
- { 0, 4, pfn(0, 1, 3, 4), 4 * PAGE_SIZE, sgmax, 2 },
- { 0, 5, pfn(0, 1, 3, 4, 5), 5 * PAGE_SIZE, sgmax, 2 },
- { 0, 5, pfn(0, 1, 3, 4, 6), 5 * PAGE_SIZE, sgmax, 3 },
- { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, sgmax, 1 },
- { 0, 5, pfn(0, 1, 2, 3, 4), 5 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
- { 0, 6, pfn(0, 1, 2, 3, 4, 5), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
- { 0, 6, pfn(0, 2, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 4 },
- { 0, 6, pfn(0, 1, 3, 4, 5, 6), 6 * PAGE_SIZE, 2 * PAGE_SIZE, 3 },
- { 0, 0, NULL, 0, 0, 0 },
+ { -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 },
+ { 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 },
+ { 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax + 1, 1 },
+ { 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 },
+ { 0, 1, pfn(0), NULL, 1, sgmax, 1 },
+ { 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 },
+ { 0, 2, pfn(1, 0), NULL, 2 * PAGE_SIZE, sgmax, 2 },
+ { 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 },
+ { 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 },
+ { 0, 3, pfn(0, 1, 2), pfn(3, 4, 5), 3 * PAGE_SIZE, sgmax, 1 },
+ { 0, 3, pfn(0, 1, 2), pfn(4, 5, 6), 3 * PAGE_SIZE, sgmax, 2 },
+ { 0, 3, pfn(0, 2, 1), NULL, 3 * PAGE_SIZE, sgmax, 3 },
+ { 0, 3, pfn(0, 1, 3), NULL, 3 * PAGE_SIZE, sgmax, 2 },
+ { 0, 3, pfn(1, 2, 4), NULL, 3 * PAGE_SIZE, sgmax, 2 },
+ { 0, 3, pfn(1, 3, 4), NULL, 3 * PAGE_SIZE, sgmax, 2 },
+ { 0, 4, pfn(0, 1, 3, 4), NULL, 4 * PAGE_SIZE, sgmax, 2 },
+ { 0, 5, pfn(0, 1, 3, 4, 5), NULL, 5 * PAGE_SIZE, sgmax, 2 },
+ { 0, 5, pfn(0, 1, 3, 4, 6), NULL, 5 * PAGE_SIZE, sgmax, 3 },
+ { 0, 5, pfn(0, 1, 2, 3, 4), NULL, 5 * PAGE_SIZE, sgmax, 1 },
+ { 0, 5, pfn(0, 1, 2, 3, 4), NULL, 5 * PAGE_SIZE, 2 * PAGE_SIZE,
+ 3 },
+ { 0, 6, pfn(0, 1, 2, 3, 4, 5), NULL, 6 * PAGE_SIZE,
+ 2 * PAGE_SIZE, 3 },
+ { 0, 6, pfn(0, 2, 3, 4, 5, 6), NULL, 6 * PAGE_SIZE,
+ 2 * PAGE_SIZE, 4 },
+ { 0, 6, pfn(0, 1, 3, 4, 5, 6), pfn(7, 8, 9, 10, 11, 12),
+ 6 * PAGE_SIZE, 12 * PAGE_SIZE, 2 },
+ { 0, 0, NULL, NULL, 0, 0, 0 },
};
unsigned int i;
for (i = 0, test = tests; test->expected_segments; test++, i++) {
+ int left_pages = test->pfn_app ? test->num_pages : 0;
struct page *pages[MAX_PAGES];
struct sg_table st;
struct scatterlist *sg;
@@ -84,14 +93,23 @@ int main(void)
set_pages(pages, test->pfn, test->num_pages);
sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0,
- test->size, test->max_seg, NULL, 0, GFP_KERNEL);
+ test->size, test->max_seg, NULL, left_pages, GFP_KERNEL);
assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret);
if (test->alloc_ret)
continue;
+ if (test->pfn_app) {
+ set_pages(pages, test->pfn_app, test->num_pages);
+ sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0,
+ test->size, test->max_seg, sg, 0, GFP_KERNEL);
+
+ assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret);
+ }
+
VALIDATE(st.nents == test->expected_segments, &st, test);
- VALIDATE(st.orig_nents == test->expected_segments, &st, test);
+ if (!test->pfn_app)
+ VALIDATE(st.orig_nents == test->expected_segments, &st, test);
sg_free_table(&st);
}