assert_object_held(obj);
cur = i915_gem_object_pin_map(obj, map_type); if (IS_ERR(cur)) return PTR_ERR(cur);
if (fill) for (i = 0; i < count; ++i)
*cur++ = i; else for (i = 0; i < count; ++i) if (*cur++ != i) {
pr_err("Object content mismatch at location %d of %d\n", i, count);
err = -EINVAL; break;
}
err = i915_gem_object_lock(obj, ww); if (err) return err;
if (vma) {
err = i915_vma_pin_ww(vma, ww, obj->base.size, 0,
0UL | PIN_OFFSET_FIXED |
PIN_USER); if (err) { if (err != -EINTR && err != ERESTARTSYS &&
err != -EDEADLK)
pr_err("Failed to pin vma.\n"); return err;
}
i915_vma_unpin(vma);
}
/* * Migration will implicitly unbind (asynchronously) any bound * vmas.
*/ if (i915_gem_object_is_lmem(obj)) {
err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM); if (err) { if (!silent_migrate)
pr_err("Object failed migration to smem\n"); if (err) return err;
}
if (i915_gem_object_is_lmem(obj)) {
pr_err("object still backed by lmem\n");
err = -EINVAL;
}
if (!i915_gem_object_has_struct_page(obj)) {
pr_err("object not backed by struct page\n");
err = -EINVAL;
}
} else {
err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM_0); if (err) { if (!silent_migrate)
pr_err("Object failed migration to lmem\n"); if (err) return err;
}
if (i915_gem_object_has_struct_page(obj)) {
pr_err("object still backed by struct page\n");
err = -EINVAL;
}
if (!i915_gem_object_is_lmem(obj)) {
pr_err("object not backed by lmem\n");
err = -EINVAL;
}
}
obj = i915_gem_object_create_lmem(i915, SZ_2M, 0); if (IS_ERR(obj)) return PTR_ERR(obj);
if (vm) {
vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) {
err = PTR_ERR(vma); goto out_put;
}
}
/* Initial GPU fill, sync, CPU initialization. */
for_i915_gem_ww(&ww, err, true) {
err = i915_gem_object_lock(obj, &ww); if (err) continue;
err = ____i915_gem_object_get_pages(obj); if (err) continue;
err = intel_migrate_clear(>->migrate, &ww, deps,
obj->mm.pages->sgl, obj->pat_index,
i915_gem_object_is_lmem(obj),
0xdeadbeaf, &rq); if (rq) {
err = dma_resv_reserve_fences(obj->base.resv, 1); if (!err)
dma_resv_add_fence(obj->base.resv, &rq->fence,
DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
} if (err) continue;
if (!vma) {
err = igt_fill_check_buffer(obj, gt, true); if (err) continue;
}
} if (err) goto out_put;
/* * Migrate to and from smem without explicitly syncing. * Finalize with data in smem for fast readout.
*/ for (i = 1; i <= 5; ++i) {
for_i915_gem_ww(&ww, err, true)
err = lmem_pages_migrate_one(&ww, obj, vma,
borked_migrate); if (err) goto out_put;
}
err = i915_gem_object_lock_interruptible(obj, NULL); if (err) goto out_put;
if (spin) { if (dma_fence_is_signaled(spin_fence)) {
pr_err("Spinner was terminated by hangcheck.\n");
err = -EBUSY; goto out_unlock;
}
igt_spinner_end(spin);
}
/* Finally sync migration and check content. */
err = i915_gem_object_wait_migration(obj, true); if (err) goto out_unlock;
if (vma) {
err = i915_vma_wait_for_bind(vma); if (err) goto out_unlock;
} else {
err = igt_fill_check_buffer(obj, gt, false);
}
/* * This subtest tests that unbinding at migration is indeed performed * async. We launch a spinner and a number of migrations depending on * that spinner to have terminated. Before each migration we bind a * vma, which should then be async unbound by the migration operation. * If we are able to schedule migrations without blocking while the * spinner is still running, those unbinds are indeed async and non- * blocking. * * Note that each async bind operation is awaiting the previous migration * due to the moving fence resulting from the migration.
*/ staticint igt_async_migrate(struct intel_gt *gt)
{ struct intel_engine_cs *engine; enum intel_engine_id id; struct i915_ppgtt *ppgtt; struct igt_spinner spin; int err;
ppgtt = i915_ppgtt_create(gt, 0); if (IS_ERR(ppgtt)) return PTR_ERR(ppgtt);
if (igt_spinner_init(&spin, gt)) {
err = -ENOMEM; goto out_spin;
}
ce = intel_context_create(engine); if (IS_ERR(ce)) {
err = PTR_ERR(ce); goto out_ce;
}
/* * Use MI_NOOP, making the spinner non-preemptible. If there * is a code path where we fail async operation due to the * running spinner, we will block and fail to end the * spinner resulting in a deadlock. But with a non- * preemptible spinner, hangcheck will terminate the spinner * for us, and we will later detect that and fail the test.
*/
rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
intel_context_put(ce); if (IS_ERR(rq)) {
err = PTR_ERR(rq); goto out_ce;
}
/* * Setting ASYNC_FAIL_ALLOC to 2 will simulate memory allocation failure while * arming the migration error check and block async migration. This * will cause us to deadlock and hangcheck will terminate the spinner * causing the test to fail.
*/ #define ASYNC_FAIL_ALLOC 1 staticint igt_lmem_async_migrate(void *arg)
{ int fail_gpu, fail_alloc, ban_memcpy, ret; struct intel_gt *gt = arg;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.