/* Move bo to VRAM if not already there. */
ret = xe_bo_validate(bo, NULL, false); if (ret) {
KUNIT_FAIL(test, "Failed to validate bo.\n"); return ret;
}
/* Optionally clear bo *and* CCS data in VRAM. */ if (clear) {
fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource,
XE_MIGRATE_CLEAR_FLAG_FULL); if (IS_ERR(fence)) {
KUNIT_FAIL(test, "Failed to submit bo clear.\n"); return PTR_ERR(fence);
}
if (dma_fence_wait_timeout(fence, false, 5 * HZ) <= 0) {
dma_fence_put(fence);
KUNIT_FAIL(test, "Timeout while clearing bo.\n"); return -ETIME;
}
dma_fence_put(fence);
}
/* Evict to system. CCS data should be copied. */
ret = xe_bo_evict(bo); if (ret) {
KUNIT_FAIL(test, "Failed to evict bo.\n"); return ret;
}
/* Sync all migration blits */
timeout = dma_resv_wait_timeout(bo->ttm.base.resv,
DMA_RESV_USAGE_KERNEL, true,
5 * HZ); if (timeout <= 0) {
KUNIT_FAIL(test, "Failed to sync bo eviction.\n"); return -ETIME;
}
/* * Bo with CCS data is now in system memory. Verify backing store * and data integrity. Then assign for the next testing round while * we still have a CPU map.
*/
ttm = bo->ttm.ttm; if (!ttm || !ttm_tt_is_populated(ttm)) {
KUNIT_FAIL(test, "Bo was not in expected placement.\n"); return -EINVAL;
}
ccs_page = xe_bo_ccs_pages_start(bo) >> PAGE_SHIFT; if (ccs_page >= ttm->num_pages) {
KUNIT_FAIL(test, "No TTM CCS pages present.\n"); return -EINVAL;
}
/* Check first CCS value */ if (cpu_map[0] != get_val) {
KUNIT_FAIL(test, "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
(unsignedlonglong)get_val,
(unsignedlonglong)cpu_map[0]);
ret = -EINVAL;
}
/* Check last CCS value, or at least last value in page. */
offset = xe_device_ccs_bytes(tile_to_xe(tile), xe_bo_size(bo));
offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1; if (cpu_map[offset] != get_val) {
KUNIT_FAIL(test, "Expected CCS readout 0x%016llx, got 0x%016llx.\n",
(unsignedlonglong)get_val,
(unsignedlonglong)cpu_map[offset]);
ret = -EINVAL;
}
if (IS_DGFX(xe))
kunit_info(test, "Testing vram id %u\n", tile->id); else
kunit_info(test, "Testing system memory\n");
bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC,
bo_flags); if (IS_ERR(bo)) {
KUNIT_FAIL(test, "Failed to create bo.\n"); return;
}
xe_bo_lock(bo, false);
kunit_info(test, "Verifying that CCS data is cleared on creation.\n");
ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL,
test); if (ret) goto out_unlock;
kunit_info(test, "Verifying that CCS data survives migration.\n");
ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL,
0xdeadbeefdeadbeefULL, test); if (ret) goto out_unlock;
kunit_info(test, "Verifying that CCS data can be properly cleared.\n");
ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test);
for_each_gt(__gt, xe, id)
xe_gt_sanitize(__gt);
err = xe_bo_restore_early(xe); /* * Snapshotting the CTB and copying back a potentially old * version seems risky, depending on what might have been * inflight. Also it seems snapshotting the ADS object and * copying back results in serious breakage. Normally when * calling xe_bo_restore_kernel() we always fully restart the * GT, which re-intializes such things. We could potentially * skip saving and restoring such objects in xe_bo_evict_all() * however seems quite fragile not to also restart the GT. Try * to do that here by triggering a GT reset.
*/
for_each_gt(__gt, xe, id)
xe_gt_reset(__gt);
if (ret) {
KUNIT_FAIL(test, "Error mapping bo %u for content check.\n", bo_nr); returntrue;
}
for (i = 0; i < bo->ttm.base.size; i += sizeof(u32)) {
u32 val = prandom_u32_state(state);
if (iosys_map_rd(&map, i, u32) != val) {
KUNIT_FAIL(test, "Content not preserved, bo %u offset 0x%016llx",
bo_nr, (unsignedlonglong)i);
kunit_info(test, "Failed value is 0x%08x, recorded 0x%08x\n",
(unsignedint)iosys_map_rd(&map, i, u32), val); if (i == 0 && val != link->val)
kunit_info(test, "Looks like PRNG is out of sync.\n");
failed = true; break;
}
}
ttm_bo_vunmap(&bo->ttm, &map);
return failed;
}
/* * Try to create system bos corresponding to twice the amount * of available system memory to test shrinker functionality. * If no swap space is available to accommodate the * memory overcommit, mark bos purgeable.
*/ staticint shrink_test_run_device(struct xe_device *xe)
{ struct kunit *test = kunit_get_current_test();
LIST_HEAD(bos); struct xe_bo_link *link, *next; struct sysinfo si;
u64 ram, ram_and_swap, purgeable = 0, alloced, to_alloc, limit; unsignedint interrupted = 0, successful = 0, count = 0; struct rnd_state prng;
u64 rand_seed; bool failed = false;
kunit_info(test, "Free ram is %lu bytes. Will allocate twice of that.\n",
(unsignedlong)ram); for (alloced = 0; alloced < to_alloc; alloced += XE_BO_SHRINK_SIZE) { struct xe_bo *bo; unsignedint mem_type; struct xe_ttm_tt *xe_tt;
link = kzalloc(sizeof(*link), GFP_KERNEL); if (!link) {
KUNIT_FAIL(test, "Unexpected link allocation failure\n");
failed = true; break;
}
INIT_LIST_HEAD(&link->link);
/* We can create bos using WC caching here. But it is slower. */
bo = xe_bo_create_user(xe, NULL, NULL, XE_BO_SHRINK_SIZE,
DRM_XE_GEM_CPU_CACHING_WB,
XE_BO_FLAG_SYSTEM); if (IS_ERR(bo)) { if (bo != ERR_PTR(-ENOMEM) && bo != ERR_PTR(-ENOSPC) &&
bo != ERR_PTR(-EINTR) && bo != ERR_PTR(-ERESTARTSYS))
KUNIT_FAIL(test, "Error creating bo: %pe\n", bo);
kfree(link);
failed = true; break;
}
xe_bo_lock(bo, false);
xe_tt = container_of(bo->ttm.ttm, typeof(*xe_tt), ttm);
/* * Allocate purgeable bos first, because if we do it the * other way around, they may not be subject to swapping...
*/ if (alloced < purgeable) {
xe_ttm_tt_account_subtract(xe, &xe_tt->ttm);
xe_tt->purgeable = true;
xe_ttm_tt_account_add(xe, &xe_tt->ttm);
bo->ttm.priority = 0;
spin_lock(&bo->ttm.bdev->lru_lock);
ttm_bo_move_to_lru_tail(&bo->ttm);
spin_unlock(&bo->ttm.bdev->lru_lock);
} else { int ret = shrink_test_fill_random(bo, &prng, link);
if (ret) {
xe_bo_unlock(bo);
xe_bo_put(bo);
KUNIT_FAIL(test, "Error filling bo with random data: %pe\n",
ERR_PTR(ret));
kfree(link);
failed = true; break;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.