/* * For the common case of one memory region, skip storing an * allocated array and just point at the region directly.
*/ if (n_placements == 1) { struct intel_memory_region *mr = placements[0]; struct drm_i915_private *i915 = mr->i915;
ret = drm_gem_handle_create(file, &obj->base, handle_p); /* drop reference from allocate - handle holds it now */
i915_gem_object_put(obj); if (ret) return ret;
/* For most of the ABI (e.g. mmap) we think in system pages */
GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
if (i915_gem_object_size_2big(size)) return ERR_PTR(-E2BIG);
obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM);
ret = object_set_placements(obj, placements, n_placements); if (ret) goto object_free;
/* * I915_BO_ALLOC_USER will make sure the object is cleared before * any user access.
*/
flags = I915_BO_ALLOC_USER;
ret = mr->ops->init_object(mr, obj, I915_BO_INVALID_OFFSET, size, 0, flags); if (ret) goto object_free;
GEM_BUG_ON(size != obj->base.size);
/* Add any flag set by create_ext options */
obj->flags |= ext_flags;
trace_i915_gem_object_create(obj); return obj;
object_free: if (obj->mm.n_placements > 1)
kfree(obj->mm.placements);
i915_gem_object_free(obj); return ERR_PTR(ret);
}
/** * __i915_gem_object_create_user - Creates a new object using the same path as * DRM_I915_GEM_CREATE_EXT * @i915: i915 private * @size: size of the buffer, in bytes * @placements: possible placement regions, in priority order * @n_placements: number of possible placement regions * * This function is exposed primarily for selftests and does very little * error checking. It is assumed that the set of placement regions has * already been verified to be valid.
*/ struct drm_i915_gem_object *
__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size, struct intel_memory_region **placements, unsignedint n_placements)
{ return __i915_gem_object_create_user_ext(i915, size, placements,
n_placements, 0);
}
switch (cpp) { case 1:
format = DRM_FORMAT_C8; break; case 2:
format = DRM_FORMAT_RGB565; break; case 4:
format = DRM_FORMAT_XRGB8888; break; default: return -EINVAL;
}
/* have to work out size/pitch and return them */
args->pitch = ALIGN(args->width * cpp, 64);
/* align stride to page size so that we can remap */ if (args->pitch > intel_plane_fb_max_stride(dev, format,
DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
staticint set_placements(struct drm_i915_gem_create_ext_memory_regions *args, struct create_ext *ext_data)
{ struct drm_i915_private *i915 = ext_data->i915; struct drm_i915_gem_memory_class_instance __user *uregions =
u64_to_user_ptr(args->regions); struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
u32 mask; int i, ret = 0;
if (args->pad) {
drm_dbg(&i915->drm, "pad should be zero\n");
ret = -EINVAL;
}
if (!args->num_regions) {
drm_dbg(&i915->drm, "num_regions is zero\n");
ret = -EINVAL;
}
BUILD_BUG_ON(ARRAY_SIZE(i915->mm.regions) != ARRAY_SIZE(placements));
BUILD_BUG_ON(ARRAY_SIZE(ext_data->placements) != ARRAY_SIZE(placements)); if (args->num_regions > ARRAY_SIZE(i915->mm.regions)) {
drm_dbg(&i915->drm, "num_regions is too large\n");
ret = -EINVAL;
}
if (ret) return ret;
mask = 0; for (i = 0; i < args->num_regions; i++) { struct drm_i915_gem_memory_class_instance region; struct intel_memory_region *mr;
if (copy_from_user(®ion, uregions, sizeof(region))) return -EFAULT;
mr = intel_memory_region_lookup(i915,
region.memory_class,
region.memory_instance); if (!mr || mr->private) {
drm_dbg(&i915->drm, "Device is missing region { class: %d, inst: %d } at index = %d\n",
region.memory_class, region.memory_instance, i);
ret = -EINVAL; goto out_dump;
}
if (mask & BIT(mr->id)) {
drm_dbg(&i915->drm, "Found duplicate placement %s -> { class: %d, inst: %d } at index = %d\n",
mr->name, region.memory_class,
region.memory_instance, i);
ret = -EINVAL; goto out_dump;
}
placements[i] = mr;
mask |= BIT(mr->id);
++uregions;
}
if (ext_data->n_placements) {
ret = -EINVAL; goto out_dump;
}
ext_data->n_placements = args->num_regions; for (i = 0; i < args->num_regions; i++)
ext_data->placements[i] = placements[i];
ext_data->placement_mask = mask; return 0;
out_dump: if (1) { char buf[256];
if (ext_data->n_placements) {
repr_placements(buf, sizeof(buf),
ext_data->placements,
ext_data->n_placements);
drm_dbg(&i915->drm, "Placements were already set in previous EXT. Existing placements: %s\n",
buf);
}
repr_placements(buf, sizeof(buf), placements, i);
drm_dbg(&i915->drm, "New placements(so far validated): %s\n", buf);
}
if (args->flags & I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) { if (ext_data.n_placements == 1) return -EINVAL;
/* * We always need to be able to spill to system memory, if we * can't place in the mappable part of LMEM.
*/ if (!(ext_data.placement_mask & BIT(INTEL_REGION_SMEM))) return -EINVAL;
} else { if (ext_data.n_placements > 1 ||
ext_data.placements[0]->type != INTEL_MEMORY_SYSTEM)
ext_data.flags |= I915_BO_ALLOC_GPU_ONLY;
}
if (ext_data.pat_index != PAT_INDEX_NOT_SET) {
i915_gem_object_set_pat_index(obj, ext_data.pat_index); /* Mark pat_index is set by UMD */
obj->pat_set_by_user = true;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.