diff options
Diffstat (limited to 'drivers/gpu/drm/i915/selftests')
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_buddy.c | 48 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 97 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_request.c | 211 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/i915_vma.c | 219 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.c | 136 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/igt_spinner.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/intel_memory_region.c | 95 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_gtt.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/selftests/mock_region.c | 4 |
9 files changed, 668 insertions, 149 deletions
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 632b912b0bc9..f0f5c4df8dbc 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -727,6 +727,53 @@ err_fini: return err; } +static int igt_buddy_alloc_limit(void *arg) +{ + struct i915_buddy_block *block; + struct i915_buddy_mm mm; + const u64 size = U64_MAX; + int err; + + err = i915_buddy_init(&mm, size, PAGE_SIZE); + if (err) + return err; + + if (mm.max_order != I915_BUDDY_MAX_ORDER) { + pr_err("mm.max_order(%d) != %d\n", + mm.max_order, I915_BUDDY_MAX_ORDER); + err = -EINVAL; + goto out_fini; + } + + block = i915_buddy_alloc(&mm, mm.max_order); + if (IS_ERR(block)) { + err = PTR_ERR(block); + goto out_fini; + } + + if (i915_buddy_block_order(block) != mm.max_order) { + pr_err("block order(%d) != %d\n", + i915_buddy_block_order(block), mm.max_order); + err = -EINVAL; + goto out_free; + } + + if (i915_buddy_block_size(&mm, block) != + BIT_ULL(mm.max_order) * PAGE_SIZE) { + pr_err("block size(%llu) != %llu\n", + i915_buddy_block_size(&mm, block), + BIT_ULL(mm.max_order) * PAGE_SIZE); + err = -EINVAL; + goto out_free; + } + +out_free: + i915_buddy_free(&mm, block); +out_fini: + i915_buddy_fini(&mm); + return err; +} + int i915_buddy_mock_selftests(void) { static const struct i915_subtest tests[] = { @@ -735,6 +782,7 @@ int i915_buddy_mock_selftests(void) SUBTEST(igt_buddy_alloc_pathological), SUBTEST(igt_buddy_alloc_smoke), SUBTEST(igt_buddy_alloc_range), + SUBTEST(igt_buddy_alloc_limit), }; return i915_subtests(tests, NULL); diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index c1adea8765a9..45c6c0107c7c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -121,7 +121,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) goto err; drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &fake_ops, &lock_class); + i915_gem_object_init(obj, &fake_ops, &lock_class, 0); i915_gem_object_set_volatile(obj); @@ -130,7 +130,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) obj->cache_level = I915_CACHE_NONE; /* Preallocate the "backing storage" */ - if (i915_gem_object_pin_pages(obj)) + if (i915_gem_object_pin_pages_unlocked(obj)) goto err_obj; i915_gem_object_unpin_pages(obj); @@ -146,6 +146,7 @@ static int igt_ppgtt_alloc(void *arg) { struct drm_i915_private *dev_priv = arg; struct i915_ppgtt *ppgtt; + struct i915_gem_ww_ctx ww; u64 size, last, limit; int err = 0; @@ -171,6 +172,12 @@ static int igt_ppgtt_alloc(void *arg) limit = totalram_pages() << PAGE_SHIFT; limit = min(ppgtt->vm.total, limit); + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_vm_lock_objects(&ppgtt->vm, &ww); + if (err) + goto err_ppgtt_cleanup; + /* Check we can allocate the entire range */ for (size = 4096; size <= limit; size <<= 2) { struct i915_vm_pt_stash stash = {}; @@ -215,6 +222,13 @@ static int igt_ppgtt_alloc(void *arg) } err_ppgtt_cleanup: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + i915_vm_put(&ppgtt->vm); return err; } @@ -276,7 +290,7 @@ static int lowlevel_hole(struct i915_address_space *vm, GEM_BUG_ON(obj->base.size != BIT_ULL(size)); - if (i915_gem_object_pin_pages(obj)) { + if (i915_gem_object_pin_pages_unlocked(obj)) { i915_gem_object_put(obj); kfree(order); break; @@ -297,20 +311,36 @@ static int lowlevel_hole(struct i915_address_space *vm, if (vm->allocate_va_range) { struct i915_vm_pt_stash stash = {}; + struct i915_gem_ww_ctx ww; + int err; + + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_vm_lock_objects(vm, &ww); + if (err) + goto alloc_vm_end; + err = -ENOMEM; if (i915_vm_alloc_pt_stash(vm, &stash, BIT_ULL(size))) - break; - - if (i915_vm_pin_pt_stash(vm, &stash)) { - i915_vm_free_pt_stash(vm, &stash); - break; - } + goto alloc_vm_end; - vm->allocate_va_range(vm, &stash, - addr, BIT_ULL(size)); + err = i915_vm_pin_pt_stash(vm, &stash); + if (!err) + vm->allocate_va_range(vm, &stash, + addr, BIT_ULL(size)); i915_vm_free_pt_stash(vm, &stash); +alloc_vm_end: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + if (err) + break; } mock_vma->pages = obj->mm.pages; @@ -1076,7 +1106,8 @@ static int igt_ppgtt_shrink_boom(void *arg) return exercise_ppgtt(arg, shrink_boom); } -static int sort_holes(void *priv, struct list_head *A, struct list_head *B) +static int sort_holes(void *priv, const struct list_head *A, + const struct list_head *B) { struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack); struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack); @@ -1166,7 +1197,7 @@ static int igt_ggtt_page(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_free; @@ -1333,7 +1364,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1385,7 +1416,7 @@ static int igt_gtt_reserve(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1549,7 +1580,7 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1658,7 +1689,7 @@ static int igt_gtt_insert(void *arg) goto out; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); goto out; @@ -1829,7 +1860,7 @@ static int igt_cs_tlb(void *arg) goto out_vm; } - batch = i915_gem_object_pin_map(bbe, I915_MAP_WC); + batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto out_put_bbe; @@ -1845,7 +1876,7 @@ static int igt_cs_tlb(void *arg) } /* Track the execution of each request by writing into different slot */ - batch = i915_gem_object_pin_map(act, I915_MAP_WC); + batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC); if (IS_ERR(batch)) { err = PTR_ERR(batch); goto out_put_act; @@ -1892,7 +1923,7 @@ static int igt_cs_tlb(void *arg) goto out_put_out; GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE); - result = i915_gem_object_pin_map(out, I915_MAP_WB); + result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB); if (IS_ERR(result)) { err = PTR_ERR(result); goto out_put_out; @@ -1908,6 +1939,7 @@ static int igt_cs_tlb(void *arg) while (!__igt_timeout(end_time, NULL)) { struct i915_vm_pt_stash stash = {}; struct i915_request *rq; + struct i915_gem_ww_ctx ww; u64 offset; offset = igt_random_offset(&prng, @@ -1926,19 +1958,30 @@ static int igt_cs_tlb(void *arg) if (err) goto end; + i915_gem_ww_ctx_init(&ww, false); +retry: + err = i915_vm_lock_objects(vm, &ww); + if (err) + goto end_ww; + err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size); if (err) - goto end; + goto end_ww; err = i915_vm_pin_pt_stash(vm, &stash); - if (err) { - i915_vm_free_pt_stash(vm, &stash); - goto end; - } - - vm->allocate_va_range(vm, &stash, offset, chunk_size); + if (!err) + vm->allocate_va_range(vm, &stash, offset, chunk_size); i915_vm_free_pt_stash(vm, &stash); +end_ww: + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + if (err) + goto end; /* Prime the TLB with the dummy pages */ for (i = 0; i < count; i++) { diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index d2a678a2497e..ee8e753d98ce 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -609,6 +609,206 @@ static int live_nop_request(void *arg) return err; } +static int __cancel_inactive(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + int err = 0; + + if (igt_spinner_init(&spin, engine->gt)) + return -ENOMEM; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + + pr_debug("%s: Cancelling inactive request\n", engine->name); + i915_request_cancel(rq, -EINTR); + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("%s: Failed to cancel inactive request\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_rq; + } + + if (rq->fence.error != -EINTR) { + pr_err("%s: fence not cancelled (%u)\n", + engine->name, rq->fence.error); + err = -EINVAL; + } + +out_rq: + i915_request_put(rq); +out_ce: + intel_context_put(ce); +out_spin: + igt_spinner_fini(&spin); + if (err) + pr_err("%s: %s error %d\n", __func__, engine->name, err); + return err; +} + +static int __cancel_active(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + int err = 0; + + if (igt_spinner_init(&spin, engine->gt)) + return -ENOMEM; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + + pr_debug("%s: Cancelling active request\n", engine->name); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin, rq)) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("Failed to start spinner on %s\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_rq; + } + i915_request_cancel(rq, -EINTR); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = drm_info_printer(engine->i915->drm.dev); + + pr_err("%s: Failed to cancel active request\n", engine->name); + intel_engine_dump(engine, &p, "%s\n", engine->name); + err = -ETIME; + goto out_rq; + } + + if (rq->fence.error != -EINTR) { + pr_err("%s: fence not cancelled (%u)\n", + engine->name, rq->fence.error); + err = -EINVAL; + } + +out_rq: + i915_request_put(rq); +out_ce: + intel_context_put(ce); +out_spin: + igt_spinner_fini(&spin); + if (err) + pr_err("%s: %s error %d\n", __func__, engine->name, err); + return err; +} + +static int __cancel_completed(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct igt_spinner spin; + struct i915_request *rq; + int err = 0; + + if (igt_spinner_init(&spin, engine->gt)) + return -ENOMEM; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ce; + } + igt_spinner_end(&spin); + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -ETIME; + goto out_rq; + } + + pr_debug("%s: Cancelling completed request\n", engine->name); + i915_request_cancel(rq, -EINTR); + if (rq->fence.error) { + pr_err("%s: fence not cancelled (%u)\n", + engine->name, rq->fence.error); + err = -EINVAL; + } + +out_rq: + i915_request_put(rq); +out_ce: + intel_context_put(ce); +out_spin: + igt_spinner_fini(&spin); + if (err) + pr_err("%s: %s error %d\n", __func__, engine->name, err); + return err; +} + +static int live_cancel_request(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct intel_engine_cs *engine; + + /* + * Check cancellation of requests. We expect to be able to immediately + * cancel active requests, even if they are currently on the GPU. + */ + + for_each_uabi_engine(engine, i915) { + struct igt_live_test t; + int err, err2; + + if (!intel_engine_has_preemption(engine)) + continue; + + err = igt_live_test_begin(&t, i915, __func__, engine->name); + if (err) + return err; + + err = __cancel_inactive(engine); + if (err == 0) + err = __cancel_active(engine); + if (err == 0) + err = __cancel_completed(engine); + + err2 = igt_live_test_end(&t); + if (err) + return err; + if (err2) + return err2; + } + + return 0; +} + static struct i915_vma *empty_batch(struct drm_i915_private *i915) { struct drm_i915_gem_object *obj; @@ -620,7 +820,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) if (IS_ERR(obj)) return ERR_CAST(obj); - cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); + cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err; @@ -782,7 +982,7 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (err) goto err; - cmd = i915_gem_object_pin_map(obj, I915_MAP_WC); + cmd = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); goto err; @@ -817,7 +1017,7 @@ static int recursive_batch_resolve(struct i915_vma *batch) { u32 *cmd; - cmd = i915_gem_object_pin_map(batch->obj, I915_MAP_WC); + cmd = i915_gem_object_pin_map_unlocked(batch->obj, I915_MAP_WC); if (IS_ERR(cmd)) return PTR_ERR(cmd); @@ -1070,8 +1270,8 @@ out_request: if (!request[idx]) break; - cmd = i915_gem_object_pin_map(request[idx]->batch->obj, - I915_MAP_WC); + cmd = i915_gem_object_pin_map_unlocked(request[idx]->batch->obj, + I915_MAP_WC); if (!IS_ERR(cmd)) { *cmd = MI_BATCH_BUFFER_END; @@ -1486,6 +1686,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_sequential_engines), SUBTEST(live_parallel_engines), SUBTEST(live_empty_request), + SUBTEST(live_cancel_request), SUBTEST(live_breadcrumbs_smoketest), }; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index 1b6125e4c1ac..5fe7b80ca0bd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -361,7 +361,7 @@ static unsigned long rotated_index(const struct intel_rotation_info *r, unsigned int x, unsigned int y) { - return (r->plane[n].stride * (r->plane[n].height - y - 1) + + return (r->plane[n].src_stride * (r->plane[n].height - y - 1) + r->plane[n].offset + x); } @@ -373,6 +373,8 @@ assert_rotated(struct drm_i915_gem_object *obj, unsigned int x, y; for (x = 0; x < r->plane[n].width; x++) { + unsigned int left; + for (y = 0; y < r->plane[n].height; y++) { unsigned long src_idx; dma_addr_t src; @@ -401,6 +403,31 @@ assert_rotated(struct drm_i915_gem_object *obj, sg = sg_next(sg); } + + left = (r->plane[n].dst_stride - y) * PAGE_SIZE; + + if (!left) + continue; + + if (!sg) { + pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n", + n, x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_len(sg) != left) { + pr_err("Invalid sg.length, found %d, expected %u for rotated page (%d, %d)\n", + sg_dma_len(sg), left, x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_address(sg) != 0) { + pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n", + &sg_dma_address(sg), x, y); + return ERR_PTR(-EINVAL); + } + + sg = sg_next(sg); } return sg; @@ -411,7 +438,7 @@ static unsigned long remapped_index(const struct intel_remapped_info *r, unsigned int x, unsigned int y) { - return (r->plane[n].stride * y + + return (r->plane[n].src_stride * y + r->plane[n].offset + x); } @@ -462,15 +489,55 @@ assert_remapped(struct drm_i915_gem_object *obj, if (!left) sg = sg_next(sg); } + + if (left) { + pr_err("Unexpected sg tail with %d size for remapped page (%d, %d)\n", + left, + x, y); + return ERR_PTR(-EINVAL); + } + + left = (r->plane[n].dst_stride - r->plane[n].width) * PAGE_SIZE; + + if (!left) + continue; + + if (!sg) { + pr_err("Invalid sg table: too short at plane %d, (%d, %d)!\n", + n, x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_len(sg) != left) { + pr_err("Invalid sg.length, found %u, expected %u for remapped page (%d, %d)\n", + sg_dma_len(sg), left, + x, y); + return ERR_PTR(-EINVAL); + } + + if (sg_dma_address(sg) != 0) { + pr_err("Invalid address, found %pad, expected 0 for remapped page (%d, %d)\n", + &sg_dma_address(sg), + x, y); + return ERR_PTR(-EINVAL); + } + + sg = sg_next(sg); + left = 0; } return sg; } -static unsigned int rotated_size(const struct intel_remapped_plane_info *a, - const struct intel_remapped_plane_info *b) +static unsigned int remapped_size(enum i915_ggtt_view_type view_type, + const struct intel_remapped_plane_info *a, + const struct intel_remapped_plane_info *b) { - return a->width * a->height + b->width * b->height; + + if (view_type == I915_GGTT_VIEW_ROTATED) + return a->dst_stride * a->width + b->dst_stride * b->width; + else + return a->dst_stride * a->height + b->dst_stride * b->height; } static int igt_vma_rotate_remap(void *arg) @@ -479,21 +546,26 @@ static int igt_vma_rotate_remap(void *arg) struct i915_address_space *vm = &ggtt->vm; struct drm_i915_gem_object *obj; const struct intel_remapped_plane_info planes[] = { - { .width = 1, .height = 1, .stride = 1 }, - { .width = 2, .height = 2, .stride = 2 }, - { .width = 4, .height = 4, .stride = 4 }, - { .width = 8, .height = 8, .stride = 8 }, + { .width = 1, .height = 1, .src_stride = 1 }, + { .width = 2, .height = 2, .src_stride = 2 }, + { .width = 4, .height = 4, .src_stride = 4 }, + { .width = 8, .height = 8, .src_stride = 8 }, + + { .width = 3, .height = 5, .src_stride = 3 }, + { .width = 3, .height = 5, .src_stride = 4 }, + { .width = 3, .height = 5, .src_stride = 5 }, - { .width = 3, .height = 5, .stride = 3 }, - { .width = 3, .height = 5, .stride = 4 }, - { .width = 3, .height = 5, .stride = 5 }, + { .width = 5, .height = 3, .src_stride = 5 }, + { .width = 5, .height = 3, .src_stride = 7 }, + { .width = 5, .height = 3, .src_stride = 9 }, - { .width = 5, .height = 3, .stride = 5 }, - { .width = 5, .height = 3, .stride = 7 }, - { .width = 5, .height = 3, .stride = 9 }, + { .width = 4, .height = 6, .src_stride = 6 }, + { .width = 6, .height = 4, .src_stride = 6 }, + + { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 }, + { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 }, + { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 }, - { .width = 4, .height = 6, .stride = 6 }, - { .width = 6, .height = 4, .stride = 6 }, { } }, *a, *b; enum i915_ggtt_view_type types[] = { @@ -515,22 +587,33 @@ static int igt_vma_rotate_remap(void *arg) for (t = types; *t; t++) { for (a = planes; a->width; a++) { for (b = planes + ARRAY_SIZE(planes); b-- != planes; ) { - struct i915_ggtt_view view; + struct i915_ggtt_view view = { + .type = *t, + .remapped.plane[0] = *a, + .remapped.plane[1] = *b, + }; + struct intel_remapped_plane_info *plane_info = view.remapped.plane; unsigned int n, max_offset; - max_offset = max(a->stride * a->height, - b->stride * b->height); + max_offset = max(plane_info[0].src_stride * plane_info[0].height, + plane_info[1].src_stride * plane_info[1].height); GEM_BUG_ON(max_offset > max_pages); max_offset = max_pages - max_offset; - view.type = *t; - view.rotated.plane[0] = *a; - view.rotated.plane[1] = *b; - - for_each_prime_number_from(view.rotated.plane[0].offset, 0, max_offset) { - for_each_prime_number_from(view.rotated.plane[1].offset, 0, max_offset) { + if (!plane_info[0].dst_stride) + plane_info[0].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ? + plane_info[0].height : + plane_info[0].width; + if (!plane_info[1].dst_stride) + plane_info[1].dst_stride = view.type == I915_GGTT_VIEW_ROTATED ? + plane_info[1].height : + plane_info[1].width; + + for_each_prime_number_from(plane_info[0].offset, 0, max_offset) { + for_each_prime_number_from(plane_info[1].offset, 0, max_offset) { struct scatterlist *sg; struct i915_vma *vma; + unsigned int expected_pages; vma = checked_vma_instance(obj, vm, &view); if (IS_ERR(vma)) { @@ -544,25 +627,27 @@ static int igt_vma_rotate_remap(void *arg) goto out_object; } + expected_pages = remapped_size(view.type, &plane_info[0], &plane_info[1]); + if (view.type == I915_GGTT_VIEW_ROTATED && - vma->size != rotated_size(a, b) * PAGE_SIZE) { + vma->size != expected_pages * PAGE_SIZE) { pr_err("VMA is wrong size, expected %lu, found %llu\n", - PAGE_SIZE * rotated_size(a, b), vma->size); + PAGE_SIZE * expected_pages, vma->size); err = -EINVAL; goto out_object; } if (view.type == I915_GGTT_VIEW_REMAPPED && - vma->size > rotated_size(a, b) * PAGE_SIZE) { + vma->size > expected_pages * PAGE_SIZE) { pr_err("VMA is wrong size, expected %lu, found %llu\n", - PAGE_SIZE * rotated_size(a, b), vma->size); + PAGE_SIZE * expected_pages, vma->size); err = -EINVAL; goto out_object; } - if (vma->pages->nents > rotated_size(a, b)) { + if (vma->pages->nents > expected_pages) { pr_err("sg table is wrong sizeo, expected %u, found %u nents\n", - rotated_size(a, b), vma->pages->nents); + expected_pages, vma->pages->nents); err = -EINVAL; goto out_object; } @@ -587,17 +672,19 @@ static int igt_vma_rotate_remap(void *arg) else sg = assert_remapped(obj, &view.remapped, n, sg); if (IS_ERR(sg)) { - pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d), (%d, %d, %d, %d)]\n", + pr_err("Inconsistent %s VMA pages for plane %d: [(%d, %d, %d, %d, %d), (%d, %d, %d, %d, %d)]\n", view.type == I915_GGTT_VIEW_ROTATED ? "rotated" : "remapped", n, - view.rotated.plane[0].width, - view.rotated.plane[0].height, - view.rotated.plane[0].stride, - view.rotated.plane[0].offset, - view.rotated.plane[1].width, - view.rotated.plane[1].height, - view.rotated.plane[1].stride, - view.rotated.plane[1].offset); + plane_info[0].width, + plane_info[0].height, + plane_info[0].src_stride, + plane_info[0].dst_stride, + plane_info[0].offset, + plane_info[1].width, + plane_info[1].height, + plane_info[1].src_stride, + plane_info[1].dst_stride, + plane_info[1].offset); err = -EINVAL; goto out_object; } @@ -849,21 +936,26 @@ static int igt_vma_remapped_gtt(void *arg) { struct drm_i915_private *i915 = arg; const struct intel_remapped_plane_info planes[] = { - { .width = 1, .height = 1, .stride = 1 }, - { .width = 2, .height = 2, .stride = 2 }, - { .width = 4, .height = 4, .stride = 4 }, - { .width = 8, .height = 8, .stride = 8 }, + { .width = 1, .height = 1, .src_stride = 1 }, + { .width = 2, .height = 2, .src_stride = 2 }, + { .width = 4, .height = 4, .src_stride = 4 }, + { .width = 8, .height = 8, .src_stride = 8 }, - { .width = 3, .height = 5, .stride = 3 }, - { .width = 3, .height = 5, .stride = 4 }, - { .width = 3, .height = 5, .stride = 5 }, + { .width = 3, .height = 5, .src_stride = 3 }, + { .width = 3, .height = 5, .src_stride = 4 }, + { .width = 3, .height = 5, .src_stride = 5 }, - { .width = 5, .height = 3, .stride = 5 }, - { .width = 5, .height = 3, .stride = 7 }, - { .width = 5, .height = 3, .stride = 9 }, + { .width = 5, .height = 3, .src_stride = 5 }, + { .width = 5, .height = 3, .src_stride = 7 }, + { .width = 5, .height = 3, .src_stride = 9 }, + + { .width = 4, .height = 6, .src_stride = 6 }, + { .width = 6, .height = 4, .src_stride = 6 }, + + { .width = 2, .height = 2, .src_stride = 2, .dst_stride = 2 }, + { .width = 3, .height = 3, .src_stride = 3, .dst_stride = 4 }, + { .width = 5, .height = 6, .src_stride = 7, .dst_stride = 8 }, - { .width = 4, .height = 6, .stride = 6 }, - { .width = 6, .height = 4, .stride = 6 }, { } }, *p; enum i915_ggtt_view_type types[] = { @@ -887,10 +979,10 @@ static int igt_vma_remapped_gtt(void *arg) .type = *t, .rotated.plane[0] = *p, }; + struct intel_remapped_plane_info *plane_info = view.rotated.plane; struct i915_vma *vma; u32 __iomem *map; unsigned int x, y; - int err; i915_gem_object_lock(obj, NULL); err = i915_gem_object_set_to_gtt_domain(obj, true); @@ -898,6 +990,10 @@ static int igt_vma_remapped_gtt(void *arg) if (err) goto out; + if (!plane_info[0].dst_stride) + plane_info[0].dst_stride = *t == I915_GGTT_VIEW_ROTATED ? + p->height : p->width; + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -913,15 +1009,15 @@ static int igt_vma_remapped_gtt(void *arg) goto out; } - for (y = 0 ; y < p->height; y++) { - for (x = 0 ; x < p->width; x++) { + for (y = 0 ; y < plane_info[0].height; y++) { + for (x = 0 ; x < plane_info[0].width; x++) { unsigned int offset; u32 val = y << 16 | x; if (*t == I915_GGTT_VIEW_ROTATED) - offset = (x * p->height + y) * PAGE_SIZE; + offset = (x * plane_info[0].dst_stride + y) * PAGE_SIZE; else - offset = (y * p->width + x) * PAGE_SIZE; + offset = (y * plane_info[0].dst_stride + x) * PAGE_SIZE; iowrite32(val, &map[offset / sizeof(*map)]); } @@ -944,8 +1040,8 @@ static int igt_vma_remapped_gtt(void *arg) goto out; } - for (y = 0 ; y < p->height; y++) { - for (x = 0 ; x < p->width; x++) { + for (y = 0 ; y < plane_info[0].height; y++) { + for (x = 0 ; x < plane_info[0].width; x++) { unsigned int offset, src_idx; u32 exp = y << 16 | x; u32 val; @@ -960,8 +1056,9 @@ static int igt_vma_remapped_gtt(void *arg) if (val != exp) { pr_err("%s VMA write test failed, expected 0x%x, found 0x%x\n", *t == I915_GGTT_VIEW_ROTATED ? "Rotated" : "Remapped", - val, exp); + exp, val); i915_vma_unpin_iomap(vma); + err = -EINVAL; goto out; } } diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 83f6e5f31fb3..cfbbe415b57c 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -12,8 +12,6 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) { - unsigned int mode; - void *vaddr; int err; memset(spin, 0, sizeof(*spin)); @@ -24,6 +22,7 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) err = PTR_ERR(spin->hws); goto err; } + i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); spin->obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(spin->obj)) { @@ -31,34 +30,83 @@ int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt) goto err_hws; } - i915_gem_object_set_cache_coherency(spin->hws, I915_CACHE_LLC); - vaddr = i915_gem_object_pin_map(spin->hws, I915_MAP_WB); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); - - mode = i915_coherent_map_type(gt->i915); - vaddr = i915_gem_object_pin_map(spin->obj, mode); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_unpin_hws; - } - spin->batch = vaddr; - return 0; -err_unpin_hws: - i915_gem_object_unpin_map(spin->hws); -err_obj: - i915_gem_object_put(spin->obj); err_hws: i915_gem_object_put(spin->hws); err: return err; } +static void *igt_spinner_pin_obj(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + struct drm_i915_gem_object *obj, + unsigned int mode, struct i915_vma **vma) +{ + void *vaddr; + int ret; + + *vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(*vma)) + return ERR_CAST(*vma); + + ret = i915_gem_object_lock(obj, ww); + if (ret) + return ERR_PTR(ret); + + vaddr = i915_gem_object_pin_map(obj, mode); + + if (!ww) + i915_gem_object_unlock(obj); + + if (IS_ERR(vaddr)) + return vaddr; + + if (ww) + ret = i915_vma_pin_ww(*vma, ww, 0, 0, PIN_USER); + else + ret = i915_vma_pin(*vma, 0, 0, PIN_USER); + + if (ret) { + i915_gem_object_unpin_map(obj); + return ERR_PTR(ret); + } + + return vaddr; +} + +int igt_spinner_pin(struct igt_spinner *spin, + struct intel_context *ce, + struct i915_gem_ww_ctx *ww) +{ + void *vaddr; + + if (spin->ce && WARN_ON(spin->ce != ce)) + return -ENODEV; + spin->ce = ce; + + if (!spin->seqno) { + vaddr = igt_spinner_pin_obj(ce, ww, spin->hws, I915_MAP_WB, &spin->hws_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->seqno = memset(vaddr, 0xff, PAGE_SIZE); + } + + if (!spin->batch) { + unsigned int mode = + i915_coherent_map_type(spin->gt->i915); + + vaddr = igt_spinner_pin_obj(ce, ww, spin->obj, mode, &spin->batch_vma); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + + spin->batch = vaddr; + } + + return 0; +} + static unsigned int seqno_offset(u64 fence) { return offset_in_page(sizeof(u32) * fence); @@ -103,27 +151,18 @@ igt_spinner_create_request(struct igt_spinner *spin, if (!intel_engine_can_store_dword(ce->engine)) return ERR_PTR(-ENODEV); - vma = i915_vma_instance(spin->obj, ce->vm, NULL); - if (IS_ERR(vma)) - return ERR_CAST(vma); - - hws = i915_vma_instance(spin->hws, ce->vm, NULL); - if (IS_ERR(hws)) - return ERR_CAST(hws); + if (!spin->batch) { + err = igt_spinner_pin(spin, ce, NULL); + if (err) + return ERR_PTR(err); + } - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return ERR_PTR(err); - - err = i915_vma_pin(hws, 0, 0, PIN_USER); - if (err) - goto unpin_vma; + hws = spin->hws_vma; + vma = spin->batch_vma; rq = intel_context_create_request(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin_hws; - } + if (IS_ERR(rq)) + return ERR_CAST(rq); err = move_to_active(vma, rq, 0); if (err) @@ -186,10 +225,6 @@ cancel_rq: i915_request_set_error_once(rq, err); i915_request_add(rq); } -unpin_hws: - i915_vma_unpin(hws); -unpin_vma: - i915_vma_unpin(vma); return err ? ERR_PTR(err) : rq; } @@ -203,6 +238,9 @@ hws_seqno(const struct igt_spinner *spin, const struct i915_request *rq) void igt_spinner_end(struct igt_spinner *spin) { + if (!spin->batch) + return; + *spin->batch = MI_BATCH_BUFFER_END; intel_gt_chipset_flush(spin->gt); } @@ -211,10 +249,16 @@ void igt_spinner_fini(struct igt_spinner *spin) { igt_spinner_end(spin); - i915_gem_object_unpin_map(spin->obj); + if (spin->batch) { + i915_vma_unpin(spin->batch_vma); + i915_gem_object_unpin_map(spin->obj); + } i915_gem_object_put(spin->obj); - i915_gem_object_unpin_map(spin->hws); + if (spin->seqno) { + i915_vma_unpin(spin->hws_vma); + i915_gem_object_unpin_map(spin->hws); + } i915_gem_object_put(spin->hws); } diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.h b/drivers/gpu/drm/i915/selftests/igt_spinner.h index ec62c9ef320b..fbe5b1625b05 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.h +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.h @@ -20,11 +20,16 @@ struct igt_spinner { struct intel_gt *gt; struct drm_i915_gem_object *hws; struct drm_i915_gem_object *obj; + struct intel_context *ce; + struct i915_vma *hws_vma, *batch_vma; u32 *batch; void *seqno; }; int igt_spinner_init(struct igt_spinner *spin, struct intel_gt *gt); +int igt_spinner_pin(struct igt_spinner *spin, + struct intel_context *ce, + struct i915_gem_ww_ctx *ww); void igt_spinner_fini(struct igt_spinner *spin); struct i915_request * diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index ce7adfa3bca0..a5fc0bf3feb9 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -31,10 +31,12 @@ static void close_objects(struct intel_memory_region *mem, struct drm_i915_gem_object *obj, *on; list_for_each_entry_safe(obj, on, objects, st_link) { + i915_gem_object_lock(obj, NULL); if (i915_gem_object_has_pinned_pages(obj)) i915_gem_object_unpin_pages(obj); /* No polluting the memory region between tests */ __i915_gem_object_put_pages(obj); + i915_gem_object_unlock(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -69,7 +71,7 @@ static int igt_mock_fill(void *arg) break; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) { i915_gem_object_put(obj); break; @@ -109,7 +111,7 @@ igt_object_create(struct intel_memory_region *mem, if (IS_ERR(obj)) return obj; - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto put; @@ -123,8 +125,10 @@ put: static void igt_object_release(struct drm_i915_gem_object *obj) { + i915_gem_object_lock(obj, NULL); i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj); + i915_gem_object_unlock(obj); list_del(&obj->st_link); i915_gem_object_put(obj); } @@ -144,6 +148,82 @@ static bool is_contiguous(struct drm_i915_gem_object *obj) return true; } +static int igt_mock_reserve(void *arg) +{ + struct intel_memory_region *mem = arg; + resource_size_t avail = resource_size(&mem->region); + struct drm_i915_gem_object *obj; + const u32 chunk_size = SZ_32M; + u32 i, offset, count, *order; + u64 allocated, cur_avail; + I915_RND_STATE(prng); + LIST_HEAD(objects); + int err = 0; + + if (!list_empty(&mem->reserved)) { + pr_err("%s region reserved list is not empty\n", __func__); + return -EINVAL; + } + + count = avail / chunk_size; + order = i915_random_order(count, &prng); + if (!order) + return 0; + + /* Reserve a bunch of ranges within the region */ + for (i = 0; i < count; ++i) { + u64 start = order[i] * chunk_size; + u64 size = i915_prandom_u32_max_state(chunk_size, &prng); + + /* Allow for some really big holes */ + if (!size) + continue; + + size = round_up(size, PAGE_SIZE); + offset = igt_random_offset(&prng, 0, chunk_size, size, + PAGE_SIZE); + + err = intel_memory_region_reserve(mem, start + offset, size); + if (err) { + pr_err("%s failed to reserve range", __func__); + goto out_close; + } + + /* XXX: maybe sanity check the block range here? */ + avail -= size; + } + + /* Try to see if we can allocate from the remaining space */ + allocated = 0; + cur_avail = avail; + do { + u32 size = i915_prandom_u32_max_state(cur_avail, &prng); + + size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE); + obj = igt_object_create(mem, &objects, size, 0); + if (IS_ERR(obj)) { + if (PTR_ERR(obj) == -ENXIO) + break; + + err = PTR_ERR(obj); + goto out_close; + } + cur_avail -= size; + allocated += size; + } while (1); + + if (allocated != avail) { + pr_err("%s mismatch between allocation and free space", __func__); + err = -EINVAL; + } + +out_close: + kfree(order); + close_objects(mem, &objects); + i915_buddy_free_list(&mem->mm, &mem->reserved); + return err; +} + static int igt_mock_contiguous(void *arg) { struct intel_memory_region *mem = arg; @@ -433,7 +513,7 @@ static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) if (err) return err; - ptr = i915_gem_object_pin_map(obj, I915_MAP_WC); + ptr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(ptr)) return PTR_ERR(ptr); @@ -538,7 +618,7 @@ static int igt_lmem_create(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; @@ -577,7 +657,7 @@ static int igt_lmem_write_gpu(void *arg) goto out_file; } - err = i915_gem_object_pin_pages(obj); + err = i915_gem_object_pin_pages_unlocked(obj); if (err) goto out_put; @@ -649,7 +729,7 @@ static int igt_lmem_write_cpu(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(vaddr)) { err = PTR_ERR(vaddr); goto out_put; @@ -753,7 +833,7 @@ create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type, return obj; } - addr = i915_gem_object_pin_map(obj, type); + addr = i915_gem_object_pin_map_unlocked(obj, type); if (IS_ERR(addr)) { i915_gem_object_put(obj); if (PTR_ERR(addr) == -ENXIO) @@ -930,6 +1010,7 @@ static int perf_memcpy(void *arg) int intel_memory_region_mock_selftests(void) { static const struct i915_subtest tests[] = { + SUBTEST(igt_mock_reserve), SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), SUBTEST(igt_mock_splintered_region), diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index 7270fc8ca801..5c7ae40bba63 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -74,7 +74,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); - ppgtt->vm.dma = &i915->drm.pdev->dev; + ppgtt->vm.dma = i915->drm.dev; i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 3c6021415274..5d2d010a1e22 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -27,13 +27,13 @@ static int mock_object_init(struct intel_memory_region *mem, return -E2BIG; drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class); + i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class, flags); obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); - i915_gem_object_init_memory_region(obj, mem, flags); + i915_gem_object_init_memory_region(obj, mem); return 0; } |