/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse
*/
if (robj) {
radeon_mn_unregister(robj);
ttm_bo_put(&robj->tbo);
}
}
int radeon_gem_object_create(struct radeon_device *rdev, unsignedlong size, int alignment, int initial_domain,
u32 flags, bool kernel, struct drm_gem_object **obj)
{ struct radeon_bo *robj; unsignedlong max_size; int r;
*obj = NULL; /* At least align on page size */ if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
/* Maximum bo size is the unpinned gtt size since we use the gtt to * handle vram to system pool migrations.
*/
max_size = rdev->mc.gtt_size - rdev->gart_pin_size; if (size > max_size) {
DRM_DEBUG("Allocation size %ldMb bigger than %ldMb limit\n",
size >> 20, max_size >> 20); return -ENOMEM;
}
/* FIXME: reeimplement */
robj = gem_to_radeon_bo(gobj); /* work out where to validate the buffer to */
domain = wdomain; if (!domain) {
domain = rdomain;
} if (!domain) { /* Do nothings */
pr_warn("Set domain without domain !\n"); return 0;
} if (domain == RADEON_GEM_DOMAIN_CPU) { /* Asking for cpu access wait for object idle */
r = dma_resv_wait_timeout(robj->tbo.base.resv,
DMA_RESV_USAGE_BOOKKEEP, true, 30 * HZ); if (!r)
r = -EBUSY;
if (r < 0 && r != -EINTR) {
pr_err("Failed to wait for object: %li\n", r); return r;
}
} if (domain == RADEON_GEM_DOMAIN_VRAM && robj->prime_shared_count) { /* A BO that is associated with a dma-buf cannot be sensibly migrated to VRAM */ return -EINVAL;
} return 0;
}
int radeon_gem_init(struct radeon_device *rdev)
{
INIT_LIST_HEAD(&rdev->gem.objects); return 0;
}
if ((rdev->family < CHIP_CAYMAN) ||
(!rdev->accel_working)) { return;
}
r = radeon_bo_reserve(rbo, true); if (r) {
dev_err(rdev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); return;
}
bo_va = radeon_vm_bo_find(vm, rbo); if (bo_va) { if (--bo_va->ref_count == 0) {
radeon_vm_bo_rmv(rdev, bo_va);
}
}
radeon_bo_unreserve(rbo);
}
staticint radeon_gem_handle_lockup(struct radeon_device *rdev, int r)
{ if (r == -EDEADLK) {
r = radeon_gpu_reset(rdev); if (!r)
r = -EAGAIN;
} return r;
}
/* if we want to write to it we must require anonymous
memory and install a MMU notifier */ return -EACCES;
}
down_read(&rdev->exclusive_lock);
/* create a gem object to contain this object in */
r = radeon_gem_object_create(rdev, args->size, 0,
RADEON_GEM_DOMAIN_CPU, 0, false, &gobj); if (r) goto handle_lockup;
bo = gem_to_radeon_bo(gobj);
r = radeon_ttm_tt_set_userptr(rdev, bo->tbo.ttm, args->addr, args->flags); if (r) goto release_object;
if (args->flags & RADEON_GEM_USERPTR_REGISTER) {
r = radeon_mn_register(bo, args->addr); if (r) goto release_object;
}
if (args->flags & RADEON_GEM_USERPTR_VALIDATE) {
mmap_read_lock(current->mm);
r = radeon_bo_reserve(bo, true); if (r) {
mmap_read_unlock(current->mm); goto release_object;
}
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
radeon_bo_unreserve(bo);
mmap_read_unlock(current->mm); if (r) goto release_object;
}
r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */
drm_gem_object_put(gobj); if (r) goto handle_lockup;
handle_lockup:
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ /* transition the BO to a domain -
* just validate the BO into a certain domain */ struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_set_domain *args = data; struct drm_gem_object *gobj; int r;
/* for now if someone requests domain CPU -
* just make sure the buffer is finished with */
down_read(&rdev->exclusive_lock);
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(filp, args->handle); if (gobj == NULL) {
up_read(&rdev->exclusive_lock); return -ENOENT;
}
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
drm_gem_object_put(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r); return r;
}
/** * radeon_gem_va_update_vm -update the bo_va in its VM * * @rdev: radeon_device pointer * @bo_va: bo_va to update * * Update the bo_va directly after setting it's address. Errors are not * vital here, so they are not reported back to userspace.
*/ staticvoid radeon_gem_va_update_vm(struct radeon_device *rdev, struct radeon_bo_va *bo_va)
{ struct radeon_bo_list *vm_bos, *entry; struct list_head list; struct drm_exec exec; unsigned domain; int r;
INIT_LIST_HEAD(&list);
vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list); if (!vm_bos) return;
drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(entry, &list, list) {
r = drm_exec_prepare_obj(&exec, &entry->robj->tbo.base,
1);
drm_exec_retry_on_contention(&exec); if (unlikely(r)) goto error_cleanup;
}
r = drm_exec_prepare_obj(&exec, &bo_va->bo->tbo.base, 1);
drm_exec_retry_on_contention(&exec); if (unlikely(r)) goto error_cleanup;
}
list_for_each_entry(entry, &list, list) {
domain = radeon_mem_type_to_domain(entry->robj->tbo.resource->mem_type); /* if anything is swapped out don't swap it in here,
just abort and wait for the next CS */ if (domain == RADEON_GEM_DOMAIN_CPU) goto error_cleanup;
}
mutex_lock(&bo_va->vm->mutex);
r = radeon_vm_clear_freed(rdev, bo_va->vm); if (r) goto error_unlock;
if (bo_va->it.start && bo_va->bo)
r = radeon_vm_bo_update(rdev, bo_va, bo_va->bo->tbo.resource);
if (!rdev->vm_manager.enabled) {
args->operation = RADEON_VA_RESULT_ERROR; return -ENOTTY;
}
/* !! DONT REMOVE !! * We don't support vm_id yet, to be sure we don't have broken * userspace, reject anyone trying to use non 0 value thus moving * forward we can use those fields without breaking existant userspace
*/ if (args->vm_id) {
args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL;
}
if (args->offset < RADEON_VA_RESERVED_SIZE) {
dev_err(dev->dev, "offset 0x%lX is in reserved area 0x%X\n",
(unsignedlong)args->offset,
RADEON_VA_RESERVED_SIZE);
args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL;
}
/* don't remove, we need to enforce userspace to set the snooped flag * otherwise we will endup with broken userspace and we won't be able * to enable this feature without adding new interface
*/
invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; if ((args->flags & invalid_flags)) {
dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n",
args->flags, invalid_flags);
args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL;
}
switch (args->operation) { case RADEON_VA_MAP: case RADEON_VA_UNMAP: break; default:
dev_err(dev->dev, "unsupported operation %d\n",
args->operation);
args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL;
}
int radeon_align_pitch(struct radeon_device *rdev, int width, int cpp, bool tiled)
{ int aligned = width; int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; int pitch_mask = 0;
switch (cpp) { case 1:
pitch_mask = align_large ? 255 : 127; break; case 2:
pitch_mask = align_large ? 127 : 31; break; case 3: case 4:
pitch_mask = align_large ? 63 : 15; break;
}
r = radeon_gem_object_create(rdev, args->size, 0,
RADEON_GEM_DOMAIN_VRAM, 0, false, &gobj); if (r) return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */
drm_gem_object_put(gobj); if (r) { return r;
}
args->handle = handle; return 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.