/* * If VM isn't created yet, nothing to cleanup. And in fact calling * put_iova_spaces() with vm=NULL would be bad, in that it will tear- * down the mappings of shared buffers in other contexts.
*/ if (!ctx->vm) return;
/* * VM_BIND does not depend on implicit teardown of VMAs on handle * close, but instead on implicit teardown of the VM when the device * is closed (see msm_gem_vm_close())
*/ if (msm_context_is_vmbind(ctx)) return;
/* * TODO we might need to kick this to a queue to avoid blocking * in CLOSE ioctl
*/
dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_BOOKKEEP, false,
MAX_SCHEDULE_TIMEOUT);
/* * Cache sync.. this is a bit over-complicated, to fit dma-mapping * API. Really GPU cache is out of scope here (handled on cmdstream) * and all we need to do is invalidate newly allocated pages before * mapping to CPU as uncached/writecombine. * * On top of this, we have the added headache, that depending on * display generation, the display's iommu may be wired up to either * the toplevel drm device (mdss), or to the mdp sub-node, meaning * that here we either have dma-direct or iommu ops. * * Let this be a cautionary tail of abstraction gone wrong.
*/
/* For non-cached buffers, ensure the new pages are clean * because display controller, GPU, etc. are not coherent:
*/ if (msm_obj->flags & MSM_BO_WC)
sync_for_device(msm_obj);
/* * Skip gpuvm in the object free path to avoid a WARN_ON() splat. * See explaination in msm_gem_assert_locked()
*/ if (kref_read(&obj->refcount))
drm_gpuvm_bo_gem_evict(obj, true);
if (msm_obj->pages) { if (msm_obj->sgt) { /* For non-cached buffers, ensure the new * pages are clean because display controller, * GPU, etc. are not coherent:
*/ if (msm_obj->flags & MSM_BO_WC)
sync_for_cpu(msm_obj);
/* * vm_ops.open/drm_gem_mmap_obj and close get and put * a reference on obj. So, we dont need to hold one here.
*/
err = msm_gem_lock_interruptible(obj); if (err) {
ret = VM_FAULT_NOPAGE; goto out;
}
if (GEM_WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
msm_gem_unlock(obj); return VM_FAULT_SIGBUS;
}
/* make sure we have pages attached now */
pages = get_pages(obj); if (IS_ERR(pages)) {
ret = vmf_error(PTR_ERR(pages)); goto out_unlock;
}
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
drm_gpuvm_bo_for_each_va (vma, vm_bo) { if (vma->vm == vm) { /* lookup_vma() should only be used in paths * with at most one vma per vm
*/
GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva));
return vma;
}
}
}
return NULL;
}
/* * If close is true, this also closes the VMA (releasing the allocated * iova range) in addition to removing the iommu mapping. In the eviction * case (!close), we keep the iova allocated, but only remove the iommu * mapping.
*/ staticvoid
put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close, constchar *reason)
{ struct drm_gpuvm_bo *vm_bo, *tmp;
/* Special unpin path for use in fence-signaling path, avoiding the need * to hold the obj lock by only depending on things that a protected by * the LRU lock. In particular we know that that we already have backing * and and that the object's dma_resv has the fence for the current * submit/job which will prevent us racing against page eviction.
*/ void msm_gem_unpin_active(struct drm_gem_object *obj)
{ struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE) return -EINVAL;
vma = get_vma_locked(obj, vm, range_start, range_end); if (IS_ERR(vma)) return PTR_ERR(vma);
ret = msm_gem_pin_vma_locked(obj, vma); if (!ret) {
*iova = vma->va.addr;
pin_obj_locked(obj);
}
return ret;
}
/* * get iova and pin it. Should have a matching put * limits iova to specified range (in pages)
*/ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, struct drm_gpuvm *vm, uint64_t *iova,
u64 range_start, u64 range_end)
{ struct drm_exec exec; int ret;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end);
drm_exec_fini(&exec); /* drop locks */
return ret;
}
/* get iova and pin it. Should have a matching put */ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
uint64_t *iova)
{ return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX);
}
/* * Get an iova but don't pin it. Doesn't need a put because iovas are currently * valid for the life of the object
*/ int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm,
uint64_t *iova)
{ struct drm_gpuva *vma; struct drm_exec exec; int ret = 0;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
vma = get_vma_locked(obj, vm, 0, U64_MAX); if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} else {
*iova = vma->va.addr;
}
drm_exec_fini(&exec); /* drop locks */
/* * Get the requested iova but don't pin it. Fails if the requested iova is * not available. Doesn't need a put because iovas are currently valid for * the life of the object. * * Setting an iova of zero will clear the vma.
*/ int msm_gem_set_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, uint64_t iova)
{ struct drm_exec exec; int ret = 0;
msm_gem_lock_vm_and_obj(&exec, obj, vm); if (!iova) {
ret = clear_iova(obj, vm);
} else { struct drm_gpuva *vma;
vma = get_vma_locked(obj, vm, iova, iova + obj->size); if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
} elseif (GEM_WARN_ON(vma->va.addr != iova)) {
clear_iova(obj, vm);
ret = -EBUSY;
}
}
drm_exec_fini(&exec); /* drop locks */
/* * Unpin a iova by updating the reference counts. The memory isn't actually * purged until something else (shrinker, mm_notifier, destroy, etc) decides * to get rid of it
*/ void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm)
{ struct drm_gpuva *vma; struct drm_exec exec;
msm_gem_lock_vm_and_obj(&exec, obj, vm);
vma = lookup_vma(obj, vm); if (vma) {
msm_gem_unpin_locked(obj);
} if (!is_kms_vm(vm))
put_iova_spaces(obj, vm, true, "close");
drm_exec_fini(&exec); /* drop locks */
}
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset)
{ struct drm_gem_object *obj; int ret = 0;
/* GEM does all our handle to object mapping */
obj = drm_gem_object_lookup(file, handle); if (obj == NULL) {
ret = -ENOENT; goto fail;
}
*offset = msm_gem_mmap_offset(obj);
drm_gem_object_put(obj);
fail: return ret;
}
staticvoid *get_vaddr(struct drm_gem_object *obj, unsigned madv)
{ struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **pages; int ret = 0;
msm_gem_assert_locked(obj);
if (drm_gem_is_imported(obj)) return ERR_PTR(-ENODEV);
pages = msm_gem_get_pages_locked(obj, madv); if (IS_ERR(pages)) return ERR_CAST(pages);
pin_obj_locked(obj);
/* increment vmap_count *before* vmap() call, so shrinker can * check vmap_count (is_vunmapable()) outside of msm_obj lock. * This guarantees that we won't try to msm_gem_vunmap() this * same object from within the vmap() call (while we already * hold msm_obj lock)
*/
msm_obj->vmap_count++;
if (!msm_obj->vaddr) {
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, msm_gem_pgprot(msm_obj, PAGE_KERNEL)); if (msm_obj->vaddr == NULL) {
ret = -ENOMEM; goto fail;
}
}
msm_gem_lock(obj);
ret = msm_gem_get_vaddr_locked(obj);
msm_gem_unlock(obj);
return ret;
}
/* * Don't use this! It is for the very special case of dumping * submits from GPU hangs or faults, were the bo may already * be MSM_MADV_DONTNEED, but we know the buffer is still on the * active list.
*/ void *msm_gem_get_vaddr_active(struct drm_gem_object *obj)
{ return get_vaddr(obj, __MSM_MADV_PURGED);
}
mutex_lock(&priv->lru.lock); /* A one-way transition: */
msm_obj->madv = __MSM_MADV_PURGED;
mutex_unlock(&priv->lru.lock);
drm_gem_free_mmap_offset(obj);
/* Our goal here is to return as much of the memory as * is possible back to the system as we are called from OOM. * To do this we must instruct the shmfs to drop all of its * backing pages, *now*.
*/
shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
/* * Unpin the backing pages and make them available to be swapped out.
*/ void msm_gem_evict(struct drm_gem_object *obj)
{ struct drm_device *dev = obj->dev; struct msm_gem_object *msm_obj = to_msm_bo(obj);
/* * We need to lock any VMs the object is still attached to, but not * the object itself (see explaination in msm_gem_assert_locked()), * so just open-code this special case. * * Note that we skip the dance if we aren't attached to any VM. This * is load bearing. The driver needs to support two usage models: * * 1. Legacy kernel managed VM: Userspace expects the VMA's to be * implicitly torn down when the object is freed, the VMA's do * not hold a hard reference to the BO. * * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the * BO. This can be dropped when the VM is closed and it's associated * VMAs are torn down. (See msm_gem_vm_close()). * * In the latter case the last reference to a BO can be dropped while * we already have the VM locked. It would have already been removed * from the gpuva list, but lockdep doesn't know that. Or understand * the differences between the two usage models.
*/ if (!list_empty(&obj->gpuva.list)) {
drm_exec_init(&exec, 0, 0);
drm_exec_until_all_locked (&exec) { struct drm_gpuvm_bo *vm_bo;
drm_gem_for_each_gpuvm_bo (vm_bo, obj) {
drm_exec_lock_obj(&exec,
drm_gpuvm_resv_obj(vm_bo->vm));
drm_exec_retry_on_contention(&exec);
}
}
put_iova_spaces(obj, NULL, true, "free");
drm_exec_fini(&exec); /* drop locks */
}
if (drm_gem_is_imported(obj)) {
GEM_WARN_ON(msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated:
*/
kvfree(msm_obj->pages);
/* * In error paths, we could end up here before msm_gem_new_handle() * has changed obj->resv to point to the shared resv. In this case, * we don't want to drop a ref to the shared r_obj that we haven't * taken yet.
*/ if ((msm_obj->flags & MSM_BO_NO_SHARE) && (obj->resv != &obj->_resv)) { struct drm_gem_object *r_obj =
container_of(obj->resv, struct drm_gem_object, _resv);
/* Drop reference we hold to shared resv obj: */
drm_gem_object_put(r_obj);
}
/* Disallow zero sized objects as they make the underlying * infrastructure grumpy
*/ if (size == 0) return ERR_PTR(-EINVAL);
ret = msm_gem_new_impl(dev, flags, &obj); if (ret) return ERR_PTR(ret);
msm_obj = to_msm_bo(obj);
ret = drm_gem_object_init(dev, obj, size); if (ret) goto fail; /* * Our buffers are kept pinned, so allocating them from the * MOVABLE zone is a really bad idea, and conflicts with CMA. * See comments above new_inode() why this is required _and_ * expected if you're going to pin these pages.
*/
mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.