staticint igt_partial_tiling(void *arg)
{ constunsignedint nreal = 1 << 12; /* largest tile row x2 */ struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref; int tiling; int err;
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return 0;
/* We want to check the page mapping and fencing of a large object * mmapped through the GTT. The object we create is larger than can * possibly be mmaped as a whole, and so we must use partial GGTT vma. * We then check that a write through each partial GGTT vma ends up * in the right set of pages within the object, and with the expected * tiling, which we verify by manual swizzling.
*/
if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) /* * The swizzling pattern is actually unknown as it * varies based on physical address of each page. * See i915_gem_detect_bit_6_swizzle().
*/ break;
tile.tiling = tiling; switch (tiling) { case I915_TILING_X:
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_x; break; case I915_TILING_Y:
tile.swizzle = to_gt(i915)->ggtt->bit_6_swizzle_y; break;
}
if (!i915_ggtt_has_aperture(to_gt(i915)->ggtt)) return 0;
/* * igt_partial_tiling() does an exhastive check of partial tiling * chunking, but will undoubtably run out of time. Here, we do a * randomised search and hope over many runs of 1s with different * seeds we will do a thorough check. * * Remember to look at the st_seed if we see a flip-flop in BAT!
*/
if (i915->gem_quirks & GEM_QUIRK_PIN_SWIZZLED_PAGES) return 0;
/* Trim the device mmap space to only a page */
mmap_offset_lock(i915);
loop = 1; /* PAGE_SIZE units */
list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) { struct drm_mm_node *resv;
err = drm_mm_reserve_node(mm, resv); if (err) {
pr_err("Failed to trim VMA manager, err=%d\n", err);
kfree(resv); goto out_park;
}
}
GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
mmap_offset_unlock(i915);
/* Just fits! */ if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
pr_err("Unable to insert object into single page hole\n");
err = -EINVAL; goto out;
}
/* Too large */ if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
err = -EINVAL; goto out;
}
/* Fill the hole, further allocation attempts should then fail */
obj = create_sys_or_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) {
err = PTR_ERR(obj);
pr_err("Unable to create object for reclaimed hole\n"); goto out;
}
err = __assign_mmap_offset(obj, default_mapping(i915), &offset, NULL); if (err) {
pr_err("Unable to insert object into reclaimed hole\n"); goto err_obj;
}
if (!assert_mmap_offset(i915, PAGE_SIZE, enospc)) {
pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
err = -EINVAL; goto err_obj;
}
i915_gem_object_put(obj);
/* Now fill with busy dead objects that we expect to reap */ for (loop = 0; loop < 3; loop++) { if (intel_gt_is_wedged(to_gt(i915))) break;
if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
obj->mm.region->name);
err = -EINVAL;
}
i915_vma_unpin_iomap(vma);
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(vaddr)) return PTR_ERR(vaddr);
if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
pr_err("%s: Write via mmap did not land in backing store (WC)\n",
obj->mm.region->name);
err = -EINVAL;
}
i915_gem_object_unpin_map(obj);
mmap_read_lock(current->mm);
area = vma_lookup(current->mm, addr);
mmap_read_unlock(current->mm); if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
err = -EINVAL; goto out_unmap;
}
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
u32 x;
if (get_user(x, ux)) {
pr_err("%s: Unable to read from mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
err = -EFAULT; goto out_unmap;
}
if (x != expand32(POISON_INUSE)) {
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
obj->mm.region->name,
i * sizeof(x), x, expand32(POISON_INUSE));
err = -EINVAL; goto out_unmap;
}
x = expand32(POISON_FREE); if (put_user(x, ux)) {
pr_err("%s: Unable to write to mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
err = -EFAULT; goto out_unmap;
}
}
if (type == I915_MMAP_TYPE_GTT)
intel_gt_flush_ggtt_writes(to_gt(i915));
list_for_each_entry_safe(obj, on, objects, st_link) {
i915_gem_object_lock(obj, NULL); if (i915_gem_object_has_pinned_pages(obj))
i915_gem_object_unpin_pages(obj); /* No polluting the memory region between tests */
__i915_gem_object_put_pages(obj);
i915_gem_object_unlock(obj);
list_del(&obj->st_link);
i915_gem_object_put(obj);
}
mmap_read_lock(current->mm);
area = vma_lookup(current->mm, addr);
mmap_read_unlock(current->mm); if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
err = -EINVAL; goto out_unmap;
}
for (i = 0; i < obj->base.size / sizeof(u32); i++) {
u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
u32 x;
if (get_user(x, ux)) {
err = -EFAULT; if (!unfaultable) {
pr_err("%s: Unable to read from mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x)); goto out_unmap;
}
if (x != expand32(POISON_INUSE)) {
pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
obj->mm.region->name,
i * sizeof(x), x, expand32(POISON_INUSE));
err = -EINVAL; goto out_unmap;
}
x = expand32(POISON_FREE); if (put_user(x, ux)) {
pr_err("%s: Unable to write to mmap, offset:%zd\n",
obj->mm.region->name, i * sizeof(x));
err = -EFAULT; goto out_unmap;
}
}
obj = __i915_gem_object_create_user(i915, PAGE_SIZE,
placements,
n_placements); if (IS_ERR(obj)) return PTR_ERR(obj);
if (flags & IGT_MMAP_MIGRATE_TOPDOWN)
obj->flags |= I915_BO_ALLOC_GPU_ONLY;
err = __assign_mmap_offset(obj, I915_MMAP_TYPE_FIXED, &offset, NULL); if (err) goto out_put;
/* * This will eventually create a GEM context, due to opening dummy drm * file, which needs a tiny amount of mappable device memory for the top * level paging structures(and perhaps scratch), so make sure we * allocate early, to avoid tears.
*/
addr = igt_mmap_offset(i915, offset, obj->base.size,
PROT_WRITE, MAP_SHARED); if (IS_ERR_VALUE(addr)) {
err = addr; goto out_put;
}
if (flags & IGT_MMAP_MIGRATE_FILL) {
err = igt_fill_mappable(placements[0], &objects); if (err) goto out_put;
}
err = i915_gem_object_lock(obj, NULL); if (err) goto out_put;
err = i915_gem_object_pin_pages(obj); if (err) {
i915_gem_object_unlock(obj); goto out_put;
}
err = intel_context_migrate_clear(to_gt(i915)->migrate.context, NULL,
obj->mm.pages->sgl, obj->pat_index,
i915_gem_object_is_lmem(obj),
expand32(POISON_INUSE), &rq);
i915_gem_object_unpin_pages(obj); if (rq) {
err = dma_resv_reserve_fences(obj->base.resv, 1); if (!err)
dma_resv_add_fence(obj->base.resv, &rq->fence,
DMA_RESV_USAGE_KERNEL);
i915_request_put(rq);
}
i915_gem_object_unlock(obj); if (err) goto out_put;
if (flags & IGT_MMAP_MIGRATE_EVICTABLE)
igt_make_evictable(&objects);
if (flags & IGT_MMAP_MIGRATE_FAIL_GPU) {
err = i915_gem_object_lock(obj, NULL); if (err) goto out_put;
/* * Ensure we only simulate the gpu failuire when faulting the * pages.
*/
err = i915_gem_object_wait_moving_fence(obj, true);
i915_gem_object_unlock(obj); if (err) goto out_put;
i915_ttm_migrate_set_failure_modes(true, false);
}
/* * For testing purposes let's force small BAR, if not already * present.
*/
saved_io = mr->io; if (resource_size(&mr->io) == mr->total) {
resource_size_t io_size = resource_size(&mr->io);
io_size = rounddown_pow_of_two(io_size >> 1); if (io_size < PAGE_SIZE) continue;
/* * Allocate in the mappable portion, should be no surprises here.
*/
err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), mr, 0); if (err) goto out_io_size;
/* * Allocate in the non-mappable portion, but force migrating to * the mappable portion on fault (LMEM -> LMEM)
*/
err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
IGT_MMAP_MIGRATE_TOPDOWN |
IGT_MMAP_MIGRATE_FILL |
IGT_MMAP_MIGRATE_EVICTABLE); if (err) goto out_io_size;
/* * Allocate in the non-mappable portion, but force spilling into * system memory on fault (LMEM -> SMEM)
*/
err = __igt_mmap_migrate(mixed, ARRAY_SIZE(mixed), system,
IGT_MMAP_MIGRATE_TOPDOWN |
IGT_MMAP_MIGRATE_FILL); if (err) goto out_io_size;
/* * Allocate in the non-mappable portion, but since the mappable * portion is already full, and we can't spill to system memory, * then we should expect the fault to fail.
*/
err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
IGT_MMAP_MIGRATE_TOPDOWN |
IGT_MMAP_MIGRATE_FILL |
IGT_MMAP_MIGRATE_UNFAULTABLE); if (err) goto out_io_size;
/* * Allocate in the non-mappable portion, but force migrating to * the mappable portion on fault (LMEM -> LMEM). We then also * simulate a gpu error when moving the pages when faulting the * pages, which should result in wedging the gpu and returning * SIGBUS in the fault handler, since we can't fallback to * memcpy.
*/
err = __igt_mmap_migrate(single, ARRAY_SIZE(single), mr,
IGT_MMAP_MIGRATE_TOPDOWN |
IGT_MMAP_MIGRATE_FILL |
IGT_MMAP_MIGRATE_EVICTABLE |
IGT_MMAP_MIGRATE_FAIL_GPU |
IGT_MMAP_MIGRATE_UNFAULTABLE);
out_io_size:
mr->io = saved_io;
i915_ttm_buddy_man_force_visible_size(man,
resource_size(&mr->io) >> PAGE_SHIFT); if (err) return err;
}
return 0;
}
staticconstchar *repr_mmap_type(enum i915_mmap_type type)
{ switch (type) { case I915_MMAP_TYPE_GTT: return"gtt"; case I915_MMAP_TYPE_WB: return"wb"; case I915_MMAP_TYPE_WC: return"wc"; case I915_MMAP_TYPE_UC: return"uc"; case I915_MMAP_TYPE_FIXED: return"fixed"; default: return"unknown";
}
}
err = __get_user(y, ptr); if (err) {
pr_err("%s(%s): failed to read from user mmap\n",
obj->mm.region->name, repr_mmap_type(type)); goto out_unmap;
}
if (x != A || y != B) {
pr_err("%s(%s): failed to read/write values, found (%lx, %lx)\n",
obj->mm.region->name, repr_mmap_type(type),
x, y);
err = -EINVAL; goto out_unmap;
}
/* * Verify that the mmap access into the backing store aligns with * that of the GPU, i.e. that mmap is indeed writing into the same * page as being read by the GPU.
*/
if (!can_mmap(obj, type)) return 0;
err = wc_set(obj); if (err == -ENXIO)
err = gtt_set(obj); if (err) return err;
err = __assign_mmap_offset(obj, type, &offset, NULL); if (err) return err;
err = prefault_range(addr, obj->base.size); if (err) goto out_unmap;
err = check_present(addr, obj->base.size); if (err) {
pr_err("%s: was not present\n", obj->mm.region->name); goto out_unmap;
}
/* * After unbinding the object from the GGTT, its address may be reused * for other objects. Ergo we have to revoke the previous mmap PTE * access as it no longer points to the same object.
*/
i915_gem_object_lock(obj, NULL);
err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
i915_gem_object_unlock(obj); if (err) {
pr_err("Failed to unbind object!\n"); goto out_unmap;
}
if (type != I915_MMAP_TYPE_GTT) {
i915_gem_object_lock(obj, NULL);
__i915_gem_object_put_pages(obj);
i915_gem_object_unlock(obj); if (i915_gem_object_has_pages(obj)) {
pr_err("Failed to put-pages object!\n");
err = -EINVAL; goto out_unmap;
}
}
err = check_absent(addr, obj->base.size); if (err) {
pr_err("%s: was not absent\n", obj->mm.region->name); goto out_unmap;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.