va = ioremap_wc(mem->io.start + offset, PAGE_SIZE); if (!va) {
dev_err(mem->i915->drm.dev, "Failed to ioremap memory region [%pa + %pa] for %ps\n",
&mem->io.start, &offset, caller); return -EFAULT;
}
for (i = 0; i < ARRAY_SIZE(val); i++) {
err = __iopagetest(mem, va, PAGE_SIZE, val[i], offset, caller); if (err) break;
err = __iopagetest(mem, va, PAGE_SIZE, ~val[i], offset, caller); if (err) break;
}
iounmap(va); return err;
}
static resource_size_t random_page(resource_size_t last)
{ /* Limited to low 44b (16TiB), but should suffice for a spot check */ return get_random_u32_below(last >> PAGE_SHIFT) << PAGE_SHIFT;
}
staticint iomemtest(struct intel_memory_region *mem, bool test_all, constvoid *caller)
{
resource_size_t last, page; int err;
if (resource_size(&mem->io) < PAGE_SIZE) return 0;
last = resource_size(&mem->io) - PAGE_SIZE;
/* * Quick test to check read/write access to the iomap (backing store). * * Write a byte, read it back. If the iomapping fails, we expect * a GPF preventing further execution. If the backing store does not * exist, the read back will return garbage. We check a couple of pages, * the first and last of the specified region to confirm the backing * store + iomap does cover the entire memory region; and we check * a random offset within as a quick spot check for bad memory.
*/
if (test_all) { for (page = 0; page <= last; page += PAGE_SIZE) {
err = iopagetest(mem, page, caller); if (err) return err;
}
} else {
err = iopagetest(mem, 0, caller); if (err) return err;
err = iopagetest(mem, last, caller); if (err) return err;
err = iopagetest(mem, random_page(last), caller); if (err) return err;
}
/* XXX: consider maybe converting to an rb tree at some point */
for_each_memory_region(mr, i915, id) { if (mr->type == class && mr->instance == instance) return mr;
}
for_each_memory_region(mr, i915, id) if (mr->type == mem_type) return mr;
return NULL;
}
bool intel_memory_type_is_local(enum intel_memory_type mem_type)
{ switch (mem_type) { case INTEL_MEMORY_LOCAL: case INTEL_MEMORY_STOLEN_LOCAL: returntrue; default: returnfalse;
}
}
/** * intel_memory_region_reserve - Reserve a memory range * @mem: The region for which we want to reserve a range. * @offset: Start of the range to reserve. * @size: The size of the range to reserve. * * Return: 0 on success, negative error code on failure.
*/ int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size)
{ struct ttm_resource_manager *man = mem->region_private;
void intel_memory_region_destroy(struct intel_memory_region *mem)
{ int ret = 0;
if (mem->ops->release)
ret = mem->ops->release(mem);
GEM_WARN_ON(!list_empty_careful(&mem->objects.list));
mutex_destroy(&mem->objects.lock); if (!ret)
kfree(mem);
}
/* Global memory region registration -- only slight layer inversions! */
int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
{ int err, i;
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { struct intel_memory_region *mem = ERR_PTR(-ENODEV);
u16 type, instance;
if (!HAS_REGION(i915, i)) continue;
type = intel_region_map[i].class;
instance = intel_region_map[i].instance; switch (type) { case INTEL_MEMORY_SYSTEM: if (IS_DGFX(i915))
mem = i915_gem_ttm_system_setup(i915, type,
instance); else
mem = i915_gem_shmem_setup(i915, type,
instance); break; case INTEL_MEMORY_STOLEN_LOCAL:
mem = i915_gem_stolen_lmem_setup(i915, type, instance); if (!IS_ERR(mem))
i915->mm.stolen_region = mem; break; case INTEL_MEMORY_STOLEN_SYSTEM:
mem = i915_gem_stolen_smem_setup(i915, type, instance); if (!IS_ERR(mem))
i915->mm.stolen_region = mem; break; default: continue;
}
if (IS_ERR(mem)) {
err = PTR_ERR(mem);
drm_err(&i915->drm, "Failed to setup region(%d) type=%d\n",
err, type); goto out_cleanup;
}
if (mem) { /* Skip on non-fatal errors */
mem->id = i;
i915->mm.regions[i] = mem;
}
}
for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { struct intel_memory_region *mem = i915->mm.regions[i];
u64 region_size, io_size;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.