/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse
*/ #include <linux/ktime.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/dma-buf.h>
list_for_each_entry(file, &ddev->filelist, lhead) { struct drm_gem_object *gobj; int handle;
WARN_ONCE(1, "Still active user space clients!\n");
spin_lock(&file->table_lock);
idr_for_each_entry(&file->object_idr, gobj, handle) {
WARN_ONCE(1, "And also active allocations!\n");
drm_gem_object_put(gobj);
}
idr_destroy(&file->object_idr);
spin_unlock(&file->table_lock);
}
mutex_unlock(&ddev->filelist_mutex);
}
/* * Call from drm_gem_handle_create which appear in both new and open ioctl * case.
*/ staticint amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{ struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct amdgpu_fpriv *fpriv = file_priv->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_bo_va *bo_va; struct mm_struct *mm; int r;
mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); if (mm && mm != current->mm) return -EPERM;
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
!amdgpu_vm_is_bo_always_valid(vm, abo)) return -EPERM;
r = amdgpu_bo_reserve(abo, false); if (r) return r;
/* attach gfx eviction fence */
r = amdgpu_eviction_fence_attach(&fpriv->evf_mgr, abo); if (r) {
DRM_DEBUG_DRIVER("Failed to attach eviction fence to BO\n");
amdgpu_bo_unreserve(abo); return r;
}
amdgpu_bo_unreserve(abo);
/* Validate and add eviction fence to DMABuf imports with dynamic * attachment in compute VMs. Re-validation will be done by * amdgpu_vm_validate. Fences are on the reservation shared with the * export, which is currently required to be validated and fenced * already by amdgpu_amdkfd_gpuvm_restore_process_bos. * * Nested locking below for the case that a GEM object is opened in * kfd_mem_export_dmabuf. Since the lock below is only taken for imports, * but not for export, this is a different lock class that cannot lead to * circular lock dependencies.
*/ if (!vm->is_compute_context || !vm->process_info) return 0; if (!drm_gem_is_imported(obj) ||
!dma_buf_is_dynamic(obj->import_attach->dmabuf)) return 0;
mutex_lock_nested(&vm->process_info->lock, 1); if (!WARN_ON(!vm->process_info->eviction_fence)) {
r = amdgpu_amdkfd_bo_validate_and_fence(abo, AMDGPU_GEM_DOMAIN_GTT,
&vm->process_info->eviction_fence->base); if (r) { struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm);
drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
r = drm_exec_prepare_obj(&exec, &bo->tbo.base, 1);
drm_exec_retry_on_contention(&exec); if (unlikely(r)) goto out_unlock;
r = amdgpu_vm_lock_pd(vm, &exec, 0);
drm_exec_retry_on_contention(&exec); if (unlikely(r)) goto out_unlock;
}
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
amdgpu_eviction_fence_detach(&fpriv->evf_mgr, bo);
bo_va = amdgpu_vm_bo_find(vm, bo); if (!bo_va || --bo_va->ref_count) goto out_unlock;
amdgpu_vm_bo_del(adev, bo_va);
amdgpu_vm_bo_update_shared(bo); if (!amdgpu_vm_ready(vm)) goto out_unlock;
r = amdgpu_vm_clear_freed(adev, vm, &fence); if (unlikely(r < 0))
dev_err(adev->dev, "failed to clear page " "tables on GEM object close (%ld)\n", r); if (r || !fence) goto out_unlock;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return -EPERM; if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) return -EPERM;
/* Workaround for Thunk bug creating PROT_NONE,MAP_PRIVATE mappings * for debugger access to invisible VRAM. Should have used MAP_SHARED * instead. Clearing VM_MAYWRITE prevents the mapping from ever * becoming writable and makes is_cow_mapping(vm_flags) false.
*/ if (is_cow_mapping(vma->vm_flags) &&
!(vma->vm_flags & VM_ACCESS_FLAGS))
vm_flags_clear(vma, VM_MAYWRITE);
/* create a gem object to contain this object in */ if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { /* if gds bo is created from user space, it must be * passed to bo list
*/
DRM_ERROR("GDS bo cannot be per-vm-bo\n"); return -EINVAL;
}
flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
}
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
r = amdgpu_bo_reserve(vm->root.bo, false); if (r) return r;
resv = vm->root.bo->tbo.base.resv;
}
initial_domain = (u32)(0xffffffff & args->in.domains);
retry:
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
initial_domain,
flags, ttm_bo_type_device, resv, &gobj, fpriv->xcp_id + 1); if (r && r != -ERESTARTSYS) { if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; goto retry;
}
if (offset_in_page(args->addr | args->size)) return -EINVAL;
/* reject unknown flag values */ if (args->flags & ~(AMDGPU_GEM_USERPTR_READONLY |
AMDGPU_GEM_USERPTR_ANONONLY | AMDGPU_GEM_USERPTR_VALIDATE |
AMDGPU_GEM_USERPTR_REGISTER)) return -EINVAL;
if (!(args->flags & AMDGPU_GEM_USERPTR_READONLY) &&
!(args->flags & AMDGPU_GEM_USERPTR_REGISTER)) {
/* if we want to write to it we must install a MMU notifier */ return -EACCES;
}
/* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
0, ttm_bo_type_device, NULL, &gobj, fpriv->xcp_id + 1); if (r) return r;
bo = gem_to_amdgpu_bo(gobj);
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); if (r) goto release_object;
r = amdgpu_hmm_register(bo, args->addr); if (r) goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
&range); if (r) goto release_object;
r = amdgpu_bo_reserve(bo, true); if (r) goto user_pages_done;
amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo); if (r) goto user_pages_done;
}
r = drm_gem_handle_create(filp, gobj, &handle); if (r) goto user_pages_done;
args->handle = handle;
user_pages_done: if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
/** * amdgpu_gem_timeout - calculate jiffies timeout from absolute value * * @timeout_ns: timeout in ns * * Calculate the timeout in jiffies from an absolute timeout in ns.
*/ unsignedlong amdgpu_gem_timeout(uint64_t timeout_ns)
{ unsignedlong timeout_jiffies;
ktime_t timeout;
/* clamp timeout if it's to large */ if (((int64_t)timeout_ns) < 0) return MAX_SCHEDULE_TIMEOUT;
timeout = ktime_sub(ns_to_ktime(timeout_ns), ktime_get()); if (ktime_to_ns(timeout) < 0) return 0;
timeout_jiffies = nsecs_to_jiffies(ktime_to_ns(timeout)); /* clamp timeout to avoid unsigned-> signed overflow */ if (timeout_jiffies > MAX_SCHEDULE_TIMEOUT) return MAX_SCHEDULE_TIMEOUT - 1;
return timeout_jiffies;
}
int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ union drm_amdgpu_gem_wait_idle *args = data; struct drm_gem_object *gobj; struct amdgpu_bo *robj;
uint32_t handle = args->in.handle; unsignedlong timeout = amdgpu_gem_timeout(args->in.timeout); int r = 0; long ret;
gobj = drm_gem_object_lookup(filp, handle); if (!gobj) return -ENOENT;
robj = gem_to_amdgpu_bo(gobj);
ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ, true, timeout);
/* ret == 0 means not signaled, * ret > 0 means signaled * ret < 0 means interrupted before timeout
*/ if (ret >= 0) {
memset(args, 0, sizeof(*args));
args->out.status = (ret == 0);
} else
r = ret;
drm_gem_object_put(gobj); return r;
}
int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ struct drm_amdgpu_gem_metadata *args = data; struct drm_gem_object *gobj; struct amdgpu_bo *robj; int r = -1;
/** * amdgpu_gem_va_update_vm -update the bo_va in its VM * * @adev: amdgpu_device pointer * @vm: vm to update * @bo_va: bo_va to update * @operation: map, unmap or clear * * Update the bo_va directly after setting its address. Errors are not * vital here, so they are not reported back to userspace. * * Returns resulting fence if freed BO(s) got cleared from the PT. * otherwise stub fence in case of error.
*/ staticstruct dma_fence *
amdgpu_gem_va_update_vm(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo_va *bo_va,
uint32_t operation)
{ struct dma_fence *fence = dma_fence_get_stub(); int r;
if (!amdgpu_vm_ready(vm)) return fence;
r = amdgpu_vm_clear_freed(adev, vm, &fence); if (r) goto error;
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) goto error;
}
r = amdgpu_vm_update_pdes(adev, vm, false);
error: if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
return fence;
}
/** * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags * * @adev: amdgpu_device pointer * @flags: GEM UAPI flags * * Returns the GEM UAPI flags mapped into hardware for the ASIC.
*/
uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags)
{
uint64_t pte_flag = 0;
if (flags & AMDGPU_VM_PAGE_EXECUTABLE)
pte_flag |= AMDGPU_PTE_EXECUTABLE; if (flags & AMDGPU_VM_PAGE_READABLE)
pte_flag |= AMDGPU_PTE_READABLE; if (flags & AMDGPU_VM_PAGE_WRITEABLE)
pte_flag |= AMDGPU_PTE_WRITEABLE; if (flags & AMDGPU_VM_PAGE_PRT)
pte_flag |= AMDGPU_PTE_PRT_FLAG(adev); if (flags & AMDGPU_VM_PAGE_NOALLOC)
pte_flag |= AMDGPU_PTE_NOALLOC;
if (adev->gmc.gmc_funcs->map_mtype)
pte_flag |= amdgpu_gmc_map_mtype(adev,
flags & AMDGPU_VM_MTYPE_MASK);
if (args->va_address < AMDGPU_VA_RESERVED_BOTTOM) {
dev_dbg(dev->dev, "va_address 0x%llx is in reserved area 0x%llx\n",
args->va_address, AMDGPU_VA_RESERVED_BOTTOM); return -EINVAL;
}
if (args->va_address >= AMDGPU_GMC_HOLE_START &&
args->va_address < AMDGPU_GMC_HOLE_END) {
dev_dbg(dev->dev, "va_address 0x%llx is in VA hole 0x%llx-0x%llx\n",
args->va_address, AMDGPU_GMC_HOLE_START,
AMDGPU_GMC_HOLE_END); return -EINVAL;
}
args->va_address &= AMDGPU_GMC_HOLE_MASK;
vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE;
vm_size -= AMDGPU_VA_RESERVED_TOP; if (args->va_address + args->map_size > vm_size) {
dev_dbg(dev->dev, "va_address 0x%llx is in top reserved area 0x%llx\n",
args->va_address + args->map_size, vm_size); return -EINVAL;
}
/* * The buffer returned from this function should be cleared, but * it can only be done if the ring is enabled or we'll fail to * create the buffer.
*/ if (adev->mman.buffer_funcs_enabled)
flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */
drm_gem_object_put(gobj); if (r) return r;
/* * Although we have a valid reference on file->pid, that does * not guarantee that the task_struct who called get_pid() is * still alive (e.g. get_pid(current) => fork() => exit()). * Therefore, we need to protect this ->comm access using RCU.
*/
rcu_read_lock();
pid = rcu_dereference(file->pid);
task = pid_task(pid, PIDTYPE_TGID);
seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
task ? task->comm : "");
rcu_read_unlock();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.