/** * DOC: VC4 GEM BO management support * * The VC4 GPU architecture (both scanout and rendering) has direct * access to system memory with no MMU in between. To support it, we * use the GEM DMA helper functions to allocate contiguous ranges of * physical memory for our BOs. * * Since the DMA allocator is very slow, we keep a cache of recently * freed BOs around so that the kernel's allocation of objects for 3D * rendering can return quickly.
*/
/* Takes ownership of *name and returns the appropriate slot for it in * the bo_labels[] array, extending it as necessary. * * This is inefficient and could use a hash table instead of walking * an array and strcmp()ing. However, the assumption is that user * labeling will be infrequent (scanout buffers and other long-lived * objects, or debug driver builds), so we can live with it for now.
*/ staticint vc4_get_user_label(struct vc4_dev *vc4, constchar *name)
{ int i; int free_slot = -1;
for (i = 0; i < vc4->num_labels; i++) { if (!vc4->bo_labels[i].name) {
free_slot = i;
} elseif (strcmp(vc4->bo_labels[i].name, name) == 0) {
kfree(name); return i;
}
}
if (vc4->bo_labels[bo->label].num_allocated == 0 &&
is_user_label(bo->label)) { /* Free user BO label slots on last unreference. * Slots are just where we track the stats for a given * name, and once a name is unused we can reuse that * slot.
*/
kfree(vc4->bo_labels[bo->label].name);
vc4->bo_labels[bo->label].name = NULL;
}
new_list = kmalloc_array(new_size, sizeof(struct list_head),
GFP_KERNEL); if (!new_list) return NULL;
/* Rebase the old cached BO lists to their new list * head locations.
*/ for (i = 0; i < vc4->bo_cache.size_list_size; i++) { struct list_head *old_list =
&vc4->bo_cache.size_list[i];
if (list_empty(old_list))
INIT_LIST_HEAD(&new_list[i]); else
list_replace(old_list, &new_list[i]);
} /* And initialize the brand new BO list heads. */ for (i = vc4->bo_cache.size_list_size; i < new_size; i++)
INIT_LIST_HEAD(&new_list[i]);
/* list_del_init() is used here because the caller might release * the purgeable lock in order to acquire the madv one and update the * madv status. * During this short period of time a user might decide to mark * the BO as unpurgeable, and if bo->madv is set to * VC4_MADV_DONTNEED it will try to remove the BO from the * purgeable list which will fail if the ->next/prev fields * are set to LIST_POISON1/LIST_POISON2 (which is what * list_del() does). * Re-initializing the list element guarantees that list_del() * will work correctly even if it's a NOP.
*/
list_del_init(&bo->size_head);
vc4->purgeable.num--;
vc4->purgeable.size -= bo->base.base.size;
}
/* Release the purgeable lock while we're purging the BO so * that other people can continue inserting things in the * purgeable pool without having to wait for all BOs to be * purged.
*/
mutex_unlock(&vc4->purgeable.lock);
mutex_lock(&bo->madv_lock);
/* Since we released the purgeable pool lock before acquiring * the BO madv one, the user may have marked the BO as WILLNEED * and re-used it in the meantime. * Before purging the BO we need to make sure * - it is still marked as DONTNEED * - it has not been re-inserted in the purgeable list * - it is not used by HW blocks * If one of these conditions is not met, just skip the entry.
*/ if (bo->madv == VC4_MADV_DONTNEED &&
list_empty(&bo->size_head) &&
!refcount_read(&bo->usecnt)) {
purged_size = bo->base.base.size;
vc4_bo_purge(obj);
}
mutex_unlock(&bo->madv_lock);
mutex_lock(&vc4->purgeable.lock);
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return ERR_PTR(-ENODEV);
if (size == 0) return ERR_PTR(-EINVAL);
/* First, try to get a vc4_bo from the kernel BO cache. */
bo = vc4_bo_get_from_cache(dev, size, type); if (bo) { if (!allow_unzeroed)
memset(bo->base.vaddr, 0, bo->base.base.size); return bo;
}
dma_obj = drm_gem_dma_create(dev, size); if (IS_ERR(dma_obj)) { /* * If we've run out of DMA memory, kill the cache of * DMA allocations we've got laying around and try again.
*/
vc4_bo_cache_purge(dev);
dma_obj = drm_gem_dma_create(dev, size);
}
if (IS_ERR(dma_obj)) { /* * Still not enough DMA memory, purge the userspace BO * cache and retry. * This is sub-optimal since we purge the whole userspace * BO cache which forces user that want to re-use the BO to * restore its initial content. * Ideally, we should purge entries one by one and retry * after each to see if DMA allocation succeeds. Or even * better, try to find an entry with at least the same * size.
*/
vc4_bo_userspace_cache_purge(dev);
dma_obj = drm_gem_dma_create(dev, size);
}
if (IS_ERR(dma_obj)) { struct drm_printer p = drm_info_printer(vc4->base.dev);
drm_err(dev, "Failed to allocate from GEM DMA helper:\n");
vc4_bo_stats_print(&p, vc4); return ERR_PTR(-ENOMEM);
}
bo = to_vc4_bo(&dma_obj->base);
/* By default, BOs do not support the MADV ioctl. This will be enabled * only on BOs that are exposed to userspace (V3D, V3D_SHADER and DUMB * BOs).
*/
bo->madv = __VC4_MADV_NOTSUPP;
/* Called on the last userspace/kernel unreference of the BO. Returns * it to the BO cache if possible, otherwise frees it.
*/ staticvoid vc4_free_object(struct drm_gem_object *gem_bo)
{ struct drm_device *dev = gem_bo->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo = to_vc4_bo(gem_bo); struct list_head *cache_list;
/* Remove the BO from the purgeable list. */
mutex_lock(&bo->madv_lock); if (bo->madv == VC4_MADV_DONTNEED && !refcount_read(&bo->usecnt))
vc4_bo_remove_from_purgeable_pool(bo);
mutex_unlock(&bo->madv_lock);
mutex_lock(&vc4->bo_lock); /* If the object references someone else's memory, we can't cache it.
*/ if (gem_bo->import_attach) {
vc4_bo_destroy(bo); goto out;
}
/* Don't cache if it was publicly named. */ if (gem_bo->name) {
vc4_bo_destroy(bo); goto out;
}
/* If this object was partially constructed but DMA allocation * had failed, just free it. Can also happen when the BO has been * purged.
*/ if (!bo->base.vaddr) {
vc4_bo_destroy(bo); goto out;
}
int vc4_bo_inc_usecnt(struct vc4_bo *bo)
{ struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); int ret;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
/* Fast path: if the BO is already retained by someone, no need to * check the madv status.
*/ if (refcount_inc_not_zero(&bo->usecnt)) return 0;
mutex_lock(&bo->madv_lock); switch (bo->madv) { case VC4_MADV_WILLNEED: if (!refcount_inc_not_zero(&bo->usecnt))
refcount_set(&bo->usecnt, 1);
ret = 0; break; case VC4_MADV_DONTNEED: /* We shouldn't use a BO marked as purgeable if at least * someone else retained its content by incrementing usecnt. * Luckily the BO hasn't been purged yet, but something wrong * is happening here. Just throw an error instead of * authorizing this use case.
*/ case __VC4_MADV_PURGED: /* We can't use a purged BO. */ default: /* Invalid madv value. */
ret = -EINVAL; break;
}
mutex_unlock(&bo->madv_lock);
staticstruct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags)
{ struct vc4_bo *bo = to_vc4_bo(obj); struct dma_buf *dmabuf; int ret;
if (bo->validated_shader) {
DRM_DEBUG("Attempting to export shader BO\n"); return ERR_PTR(-EINVAL);
}
/* Note: as soon as the BO is exported it becomes unpurgeable, because * noone ever decrements the usecnt even if the reference held by the * exported BO is released. This shouldn't be a problem since we don't * expect exported BOs to be marked as purgeable.
*/
ret = vc4_bo_inc_usecnt(bo); if (ret) {
drm_err(obj->dev, "Failed to increment BO usecnt\n"); return ERR_PTR(ret);
}
dmabuf = drm_gem_prime_export(obj, flags); if (IS_ERR(dmabuf))
vc4_bo_dec_usecnt(bo);
/* The only reason we would end up here is when user-space accesses * BO's memory after it's been purged.
*/
mutex_lock(&bo->madv_lock);
WARN_ON(bo->madv != __VC4_MADV_PURGED);
mutex_unlock(&bo->madv_lock);
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
ret = vc4_grab_bin_bo(vc4, vc4file); if (ret) return ret;
/* * We can't allocate from the BO cache, because the BOs don't * get zeroed, and that might leak data between users.
*/
bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_V3D); if (IS_ERR(bo)) return PTR_ERR(bo);
bo->madv = VC4_MADV_WILLNEED;
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_put(&bo->base.base);
ret = vc4_grab_bin_bo(vc4, vc4file); if (ret) return ret;
bo = vc4_bo_create(dev, args->size, true, VC4_BO_TYPE_V3D_SHADER); if (IS_ERR(bo)) return PTR_ERR(bo);
bo->madv = VC4_MADV_WILLNEED;
if (copy_from_user(bo->base.vaddr,
(void __user *)(uintptr_t)args->data,
args->size)) {
ret = -EFAULT; goto fail;
} /* Clear the rest of the memory from allocating from the BO * cache.
*/
memset(bo->base.vaddr + args->size, 0,
bo->base.base.size - args->size);
bo->validated_shader = vc4_validate_shader(&bo->base); if (!bo->validated_shader) {
ret = -EINVAL; goto fail;
}
/* We have to create the handle after validation, to avoid * races for users to do doing things like mmap the shader BO.
*/
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
fail:
drm_gem_object_put(&bo->base.base);
return ret;
}
/** * vc4_set_tiling_ioctl() - Sets the tiling modifier for a BO. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * The tiling state of the BO decides the default modifier of an fb if * no specific modifier was set by userspace, and the return value of * vc4_get_tiling_ioctl() (so that userspace can treat a BO it * received from dmabuf as the same tiling format as the producer * used).
*/ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_set_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; bool t_format;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
if (args->flags != 0) return -EINVAL;
switch (args->modifier) { case DRM_FORMAT_MOD_NONE:
t_format = false; break; case DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED:
t_format = true; break; default: return -EINVAL;
}
gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
bo->t_format = t_format;
drm_gem_object_put(gem_obj);
return 0;
}
/** * vc4_get_tiling_ioctl() - Gets the tiling modifier for a BO. * @dev: DRM device * @data: ioctl argument * @file_priv: DRM file for this fd * * Returns the tiling modifier for a BO as set by vc4_set_tiling_ioctl().
*/ int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_get_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
if (args->flags != 0 || args->modifier != 0) return -EINVAL;
gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); return -ENOENT;
}
bo = to_vc4_bo(gem_obj);
if (bo->t_format)
args->modifier = DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED; else
args->modifier = DRM_FORMAT_MOD_NONE;
staticvoid vc4_bo_cache_destroy(struct drm_device *dev, void *unused); int vc4_bo_cache_init(struct drm_device *dev)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; int i;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
/* Create the initial set of BO labels that the kernel will * use. This lets us avoid a bunch of string reallocation in * the kernel's draw and BO allocation paths.
*/
vc4->bo_labels = kcalloc(VC4_BO_TYPE_COUNT, sizeof(*vc4->bo_labels),
GFP_KERNEL); if (!vc4->bo_labels) return -ENOMEM;
vc4->num_labels = VC4_BO_TYPE_COUNT;
BUILD_BUG_ON(ARRAY_SIZE(bo_type_names) != VC4_BO_TYPE_COUNT); for (i = 0; i < VC4_BO_TYPE_COUNT; i++)
vc4->bo_labels[i].name = bo_type_names[i];
ret = drmm_mutex_init(dev, &vc4->bo_lock); if (ret) {
kfree(vc4->bo_labels); return ret;
}
for (i = 0; i < vc4->num_labels; i++) { if (vc4->bo_labels[i].num_allocated) {
drm_err(dev, "Destroying BO cache with %d %s " "BOs still allocated\n",
vc4->bo_labels[i].num_allocated,
vc4->bo_labels[i].name);
}
if (is_user_label(i))
kfree(vc4->bo_labels[i].name);
}
kfree(vc4->bo_labels);
}
int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_label_bo *args = data; char *name; struct drm_gem_object *gem_obj; int ret = 0, label;
if (WARN_ON_ONCE(vc4->gen > VC4_GEN_4)) return -ENODEV;
if (!args->len) return -EINVAL;
name = strndup_user(u64_to_user_ptr(args->name), args->len + 1); if (IS_ERR(name)) return PTR_ERR(name);
gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) {
drm_err(dev, "Failed to look up GEM BO %d\n", args->handle);
kfree(name); return -ENOENT;
}
mutex_lock(&vc4->bo_lock);
label = vc4_get_user_label(vc4, name); if (label != -1)
vc4_bo_set_label(gem_obj, label); else
ret = -ENOMEM;
mutex_unlock(&vc4->bo_lock);
drm_gem_object_put(gem_obj);
return ret;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.33 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.