/* * Copyright 2009 Jerome Glisse. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. *
*/ /* * Authors: * Jerome Glisse <glisse@freedesktop.org> * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> * Dave Airlie
*/
switch (bo->resource->mem_type) { case AMDGPU_PL_GDS: case AMDGPU_PL_GWS: case AMDGPU_PL_OA: case AMDGPU_PL_DOORBELL:
placement->num_placement = 0; return;
case TTM_PL_VRAM: if (!adev->mman.buffer_funcs_enabled) { /* Move to system memory */
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
/* Try evicting to the CPU inaccessible part of VRAM * first, but only set GTT as busy placement, so this * BO will be evicted to GTT rather than causing other * BOs to be evicted from VRAM
*/
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
abo->placements[0].flags |= TTM_PL_FLAG_DESIRED;
} else { /* Move to GTT memory */
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT |
AMDGPU_GEM_DOMAIN_CPU);
} break; case TTM_PL_TT: case AMDGPU_PL_PREEMPT: default:
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); break;
}
*placement = abo->placement;
}
/** * amdgpu_ttm_map_buffer - Map memory into the GART windows * @bo: buffer object to map * @mem: memory object to map * @mm_cur: range to map * @window: which GART window to use * @ring: DMA ring to use for the copy * @tmz: if we should setup a TMZ enabled mapping * @size: in number of bytes to map, out number of bytes mapped * @addr: resulting address inside the MC address space * * Setup one of the GART windows to access a specific piece of memory or return * the physical address for local memory.
*/ staticint amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct amdgpu_res_cursor *mm_cur, unsignedint window, struct amdgpu_ring *ring, bool tmz, uint64_t *size, uint64_t *addr)
{ struct amdgpu_device *adev = ring->adev; unsignedint offset, num_pages, num_dw, num_bytes;
uint64_t src_addr, dst_addr; struct amdgpu_job *job; void *cpu_addr;
uint64_t flags; unsignedint i; int r;
/** * amdgpu_ttm_copy_mem_to_mem - Helper function for copy * @adev: amdgpu device * @src: buffer/address where to read from * @dst: buffer/address where to write to * @size: number of bytes to copy * @tmz: if a secure copy should be used * @resv: resv object to sync to * @f: Returns the last fence if multiple jobs are submitted. * * The function copies @size bytes from {src->mem + src->offset} to * {dst->mem + dst->offset}. src->bo and dst->bo could be same BO for a * move and different for a BO to BO copy. *
*/ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, conststruct amdgpu_copy_mem *src, conststruct amdgpu_copy_mem *dst,
uint64_t size, bool tmz, struct dma_resv *resv, struct dma_fence **f)
{ struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_res_cursor src_mm, dst_mm; struct dma_fence *fence = NULL; int r = 0;
uint32_t copy_flags = 0; struct amdgpu_bo *abo_src, *abo_dst;
if (!adev->mman.buffer_funcs_enabled) {
dev_err(adev->dev, "Trying to move memory with ring turned off.\n"); return -EINVAL;
}
/* * amdgpu_move_blit - Copy an entire buffer to another buffer * * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to * help move buffers to and from VRAM.
*/ staticint amdgpu_move_blit(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *new_mem, struct ttm_resource *old_mem)
{ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_copy_mem src, dst; struct dma_fence *fence = NULL; int r;
/* Always block for VM page tables before committing the new location */ if (bo->type == ttm_bo_type_kernel)
r = ttm_bo_move_accel_cleanup(bo, fence, true, false, new_mem); else
r = ttm_bo_move_accel_cleanup(bo, fence, evict, true, new_mem);
dma_fence_put(fence); return r;
error: if (fence)
dma_fence_wait(fence, false);
dma_fence_put(fence); return r;
}
/** * amdgpu_res_cpu_visible - Check that resource can be accessed by CPU * @adev: amdgpu device * @res: the resource to check * * Returns: true if the full resource is CPU visible, false otherwise.
*/ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, struct ttm_resource *res)
{ struct amdgpu_res_cursor cursor;
amdgpu_res_first(res, 0, res->size, &cursor); while (cursor.remaining) { if ((cursor.start + cursor.size) > adev->gmc.visible_vram_size) returnfalse;
amdgpu_res_next(&cursor, cursor.size);
}
returntrue;
}
/* * amdgpu_res_copyable - Check that memory can be accessed by ttm_bo_move_memcpy * * Called by amdgpu_bo_move()
*/ staticbool amdgpu_res_copyable(struct amdgpu_device *adev, struct ttm_resource *mem)
{ if (!amdgpu_res_cpu_visible(adev, mem)) returnfalse;
/* ttm_resource_ioremap only supports contiguous memory */ if (mem->mem_type == TTM_PL_VRAM &&
!(mem->placement & TTM_PL_FLAG_CONTIGUOUS)) returnfalse;
returntrue;
}
/* * amdgpu_bo_move - Move a buffer object to a new memory location * * Called by ttm_bo_handle_move_mem()
*/ staticint amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem, struct ttm_place *hop)
{ struct amdgpu_device *adev; struct amdgpu_bo *abo; struct ttm_resource *old_mem = bo->resource; int r;
if (new_mem->mem_type == TTM_PL_TT ||
new_mem->mem_type == AMDGPU_PL_PREEMPT) {
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); if (r) return r;
}
if (bo->type == ttm_bo_type_device &&
new_mem->mem_type == TTM_PL_VRAM &&
old_mem->mem_type != TTM_PL_VRAM) { /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU * accesses the BO after it's moved.
*/
abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
}
amdgpu_bo_move_notify(bo, evict, new_mem); if (adev->mman.buffer_funcs_enabled)
r = amdgpu_move_blit(bo, evict, new_mem, old_mem); else
r = -ENODEV;
if (r) { /* Check that all memory is CPU accessible */ if (!amdgpu_res_copyable(adev, old_mem) ||
!amdgpu_res_copyable(adev, new_mem)) {
pr_err("Move buffer fallback to memcpy unavailable\n"); return r;
}
r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) return r;
}
/* update statistics after the move */ if (evict)
atomic64_inc(&adev->num_evictions);
atomic64_add(bo->base.size, &adev->num_bytes_moved); return 0;
}
/* * amdgpu_ttm_io_mem_reserve - Reserve a block of memory during a fault * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/ staticint amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
switch (mem->mem_type) { case TTM_PL_SYSTEM: /* system memory */ return 0; case TTM_PL_TT: case AMDGPU_PL_PREEMPT: break; case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
#ifdef CONFIG_DRM_AMDGPU_USERPTR /* * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user * memory and start HMM tracking CPU page table update * * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only * once afterwards to stop HMM tracking
*/ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, struct hmm_range **range)
{ struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); unsignedlong start = gtt->userptr; struct vm_area_struct *vma; struct mm_struct *mm; bool readonly; int r = 0;
/* Make sure get_user_pages_done() can cleanup gracefully */
*range = NULL;
mm = bo->notifier.mm; if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n"); return -EFAULT;
}
if (!mmget_not_zero(mm)) /* Happens during process shutdown */ return -ESRCH;
mmap_read_lock(mm);
vma = vma_lookup(mm, start); if (unlikely(!vma)) {
r = -EFAULT; goto out_unlock;
} if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
vma->vm_file)) {
r = -EPERM; goto out_unlock;
}
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages,
readonly, NULL, pages, range);
out_unlock:
mmap_read_unlock(mm); if (r)
pr_debug("failed %d to get user pages 0x%lx\n", r, start);
if (gtt && gtt->userptr && range)
amdgpu_hmm_range_get_pages_done(range);
}
/* * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change * Check if the pages backing this ttm range have been invalidated * * Returns: true if pages are still valid
*/ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, struct hmm_range *range)
{ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary. * * Called by amdgpu_cs_list_validate(). This creates the page list * that backs user memory and will ultimately be mapped into the device * address space.
*/ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
{ unsignedlong i;
for (i = 0; i < ttm->num_pages; ++i)
ttm->pages[i] = pages ? pages[i] : NULL;
}
/* * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages * * Called by amdgpu_ttm_backend_bind()
**/ staticint amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
{ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE; int r;
/* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
(u64)ttm->num_pages << PAGE_SHIFT,
GFP_KERNEL); if (r) goto release_sg;
/* Map SG to device */
r = dma_map_sgtable(adev->dev, ttm->sg, direction, 0); if (r) goto release_sg_table;
/* convert SG to linear array of pages and dma addresses */
drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
ttm->num_pages);
/* double check that we don't free the table twice */ if (!ttm->sg || !ttm->sg->sgl) return;
/* unmap the pages mapped to the device */
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg);
}
/* * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ... * MQDn+CtrlStackn where n is the number of XCCs per partition. * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD * and uses memory type default, UC. The rest of pages_per_xcc are * Ctrl stack and modify their memory type to NC.
*/ staticvoid amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, struct ttm_tt *ttm, uint64_t flags)
{ struct amdgpu_ttm_tt *gtt = (void *)ttm;
uint64_t total_pages = ttm->num_pages; int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp);
uint64_t page_idx, pages_per_xcc; int i;
uint64_t ctrl_flags = AMDGPU_PTE_MTYPE_VG10(flags, AMDGPU_MTYPE_NC);
/* * amdgpu_ttm_alloc_gart - Make sure buffer object is accessible either * through AGP or GART aperture. * * If bo is accessible through AGP aperture, then use AGP aperture * to access bo; otherwise allocate logical space in GART aperture * and map bo to GART aperture.
*/ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); struct ttm_placement placement; struct ttm_place placements; struct ttm_resource *tmp;
uint64_t addr, flags; int r;
if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) return 0;
addr = amdgpu_gmc_agp_addr(bo); if (addr != AMDGPU_BO_INVALID_OFFSET) return 0;
/* * amdgpu_ttm_backend_unbind - Unbind GTT mapped pages * * Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and * ttm_tt_destroy().
*/ staticvoid amdgpu_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/* if the pages have userptr pinning then clear that first */ if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
} elseif (ttm->sg && drm_gem_is_imported(gtt->gobj)) { struct dma_buf_attachment *attach;
if (gtt->usertask)
put_task_struct(gtt->usertask);
ttm_tt_fini(>t->ttm);
kfree(gtt);
}
/** * amdgpu_ttm_tt_create - Create a ttm_tt object for a given BO * * @bo: The buffer object to create a GTT ttm_tt object around * @page_flags: Page flags to be added to the ttm_tt object * * Called by ttm_tt_create().
*/ staticstruct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
uint32_t page_flags)
{ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; enum ttm_caching caching;
gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (!gtt) return NULL;
/* allocate space for the uninitialized page entries */ if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) {
kfree(gtt); return NULL;
} return >t->ttm;
}
/* * amdgpu_ttm_tt_populate - Map GTT pages visible to the device * * Map the pages of a ttm_tt object to an address space visible * to the underlying device.
*/ staticint amdgpu_ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); struct ttm_pool *pool;
pgoff_t i; int ret;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ if (gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; return 0;
}
if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return 0;
if (adev->mman.ttm_pools && gtt->pool_id >= 0)
pool = &adev->mman.ttm_pools[gtt->pool_id]; else
pool = &adev->mman.bdev.pool;
ret = ttm_pool_alloc(pool, ttm, ctx); if (ret) return ret;
for (i = 0; i < ttm->num_pages; ++i)
ttm->pages[i]->mapping = bdev->dev_mapping;
return 0;
}
/* * amdgpu_ttm_tt_unpopulate - unmap GTT pages and unpopulate page arrays * * Unmaps pages of a ttm_tt object from the device address space and * unpopulates the page array backing it.
*/ staticvoid amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); struct amdgpu_device *adev; struct ttm_pool *pool;
pgoff_t i;
if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return;
for (i = 0; i < ttm->num_pages; ++i)
ttm->pages[i]->mapping = NULL;
adev = amdgpu_ttm_adev(bdev);
if (adev->mman.ttm_pools && gtt->pool_id >= 0)
pool = &adev->mman.ttm_pools[gtt->pool_id]; else
pool = &adev->mman.bdev.pool;
return ttm_pool_free(pool, ttm);
}
/** * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current * task * * @tbo: The ttm_buffer_object that contains the userptr * @user_addr: The returned value
*/ int amdgpu_ttm_tt_get_userptr(conststruct ttm_buffer_object *tbo,
uint64_t *user_addr)
{ struct amdgpu_ttm_tt *gtt;
/** * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current * task * * @bo: The ttm_buffer_object to bind this userptr to * @addr: The address in the current tasks VM space to use * @flags: Requirements of userptr object. * * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to * initialize GPU VM for a KFD process.
*/ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
uint64_t addr, uint32_t flags)
{ struct amdgpu_ttm_tt *gtt;
if (!bo->ttm) { /* TODO: We want a separate TTM object type for userptrs */
bo->ttm = amdgpu_ttm_tt_create(bo, 0); if (bo->ttm == NULL) return -ENOMEM;
}
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
/* * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an * address range for the current task. *
*/ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsignedlong start, unsignedlong end, unsignedlong *userptr)
{ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); unsignedlong size;
if (gtt == NULL || !gtt->userptr) returnfalse;
/* Return false if no part of the ttm_tt object lies within * the range
*/
size = (unsignedlong)gtt->ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) returnfalse;
if (userptr)
*userptr = gtt->userptr; returntrue;
}
/* * amdgpu_ttm_tt_is_userptr - Have the pages backing by userptr?
*/ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
{ struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
/** * amdgpu_ttm_tt_pde_flags - Compute PDE flags for ttm_tt object * * @ttm: The ttm_tt object to compute the flags for * @mem: The memory registry backing this ttm_tt object * * Figure out the flags to use for a VM PDE (Page Directory Entry).
*/
uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
{
uint64_t flags = 0;
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
/** * amdgpu_ttm_tt_pte_flags - Compute PTE flags for ttm_tt object * * @adev: amdgpu_device pointer * @ttm: The ttm_tt object to compute the flags for * @mem: The memory registry backing this ttm_tt object * * Figure out the flags to use for a VM PTE (Page Table Entry).
*/
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, struct ttm_resource *mem)
{
uint64_t flags = amdgpu_ttm_tt_pde_flags(ttm, mem);
if (!amdgpu_ttm_tt_is_readonly(ttm))
flags |= AMDGPU_PTE_WRITEABLE;
return flags;
}
/* * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer * object. * * Return true if eviction is sensible. Called by ttm_mem_evict_first() on * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until * it can find space for a new object and by ttm_bo_force_list_clean() which is * used to clean out a memory space.
*/ staticbool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, conststruct ttm_place *place)
{ struct dma_resv_iter resv_cursor; struct dma_fence *f;
if (!amdgpu_bo_is_amdgpu_bo(bo)) return ttm_bo_eviction_valuable(bo, place);
/* Swapout? */ if (bo->resource->mem_type == TTM_PL_SYSTEM) returntrue;
if (bo->type == ttm_bo_type_kernel &&
!amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) returnfalse;
/* If bo is a KFD BO, check if the bo belongs to the current process. * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully
*/
dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
DMA_RESV_USAGE_BOOKKEEP, f) { if (amdkfd_fence_check_mm(f, current->mm) &&
!(place->flags & TTM_PL_FLAG_CONTIGUOUS)) returnfalse;
}
/* Preemptible BOs don't own system resources managed by the * driver (pages, VRAM, GART space). They point to resources * owned by someone else (e.g. pageable memory in user mode * or a DMABuf). They are used in a preemptible context so we * can guarantee no deadlocks and good QoS in case of MMU * notifiers or DMABuf move notifiers from the resource owner.
*/ if (bo->resource->mem_type == AMDGPU_PL_PREEMPT) returnfalse;
if (bo->resource->mem_type == TTM_PL_TT &&
amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) returnfalse;
/** * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. * * @bo: The buffer object to read/write * @offset: Offset into buffer object * @buf: Secondary buffer to write/read from * @len: Length in bytes of access * @write: true if writing * * This is used to access VRAM that backs a buffer object via MMIO * access for debugging purposes.
*/ staticint amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, unsignedlong offset, void *buf, int len, int write)
{ struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct amdgpu_res_cursor cursor; int ret = 0;
if (bo->resource->mem_type != TTM_PL_VRAM) return -EIO;
if (amdgpu_device_has_timeouts_enabled(adev) &&
!amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) return len;
/* * reserve TMR memory at the top of VRAM which holds * IP Discovery data and is protected by PSP.
*/ staticint amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
{ struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; bool mem_train_support = false;
uint32_t reserve_size = 0; int ret;
if (adev->bios && !amdgpu_sriov_vf(adev)) { if (amdgpu_atomfirmware_mem_training_supported(adev))
mem_train_support = true; else
DRM_DEBUG("memory training does not support!\n");
}
/* * Query reserved tmr size through atom firmwareinfo for Sienna_Cichlid and onwards for all * the use cases (IP discovery/G6 memory training/profiling/diagnostic data.etc) * * Otherwise, fallback to legacy approach to check and reserve tmr block for ip * discovery data and G6 memory training data respectively
*/ if (adev->bios)
reserve_size =
amdgpu_atomfirmware_get_fw_reserved_fb_size(adev);
/* * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. * * This initializes all of the memory space pools that the TTM layer * will need such as the GTT space (system memory mapped to the device), * VRAM (on-board memory), and on-chip memories (GDS, GWS, OA) which * can be mapped per VMID.
*/ int amdgpu_ttm_init(struct amdgpu_device *adev)
{
uint64_t gtt_size; int r;
mutex_init(&adev->mman.gtt_window_lock);
dma_set_max_seg_size(adev->dev, UINT_MAX); /* No others user of address space so set it to 0 */
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
dma_addressing_limited(adev->dev)); if (r) {
dev_err(adev->dev, "failed initializing buffer object driver(%d).\n", r); return r;
}
r = amdgpu_ttm_pools_init(adev); if (r) {
dev_err(adev->dev, "failed to init ttm pools(%d).\n", r); return r;
}
adev->mman.initialized = true;
/* Initialize VRAM pool with all of VRAM divided into pages */
r = amdgpu_vram_mgr_init(adev); if (r) {
dev_err(adev->dev, "Failed initializing VRAM heap.\n"); return r;
}
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_buffer_funcs_status(adev, false); #ifdef CONFIG_64BIT #ifdef CONFIG_X86 if (adev->gmc.xgmi.connected_to_cpu)
adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
adev->gmc.visible_vram_size);
elseif (adev->gmc.is_app_apu)
DRM_DEBUG_DRIVER( "No need to ioremap when real vram size is 0\n"); else #endif
adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
adev->gmc.visible_vram_size); #endif
/* *The reserved vram for firmware must be pinned to the specified *place on the VRAM, so reserve it early.
*/
r = amdgpu_ttm_fw_reserve_vram_init(adev); if (r) return r;
/* *The reserved vram for driver must be pinned to the specified *place on the VRAM, so reserve it early.
*/
r = amdgpu_ttm_drv_reserve_vram_init(adev); if (r) return r;
/* * only NAVI10 and onwards ASIC support for IP discovery. * If IP discovery enabled, a block of memory should be * reserved for IP discovey.
*/ if (adev->mman.discovery_bin) {
r = amdgpu_ttm_reserve_tmr(adev); if (r) return r;
}
/* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS * and driver.
*/ if (!adev->gmc.is_app_apu) {
r = amdgpu_bo_create_kernel_at(adev, 0,
adev->mman.stolen_vga_size,
&adev->mman.stolen_vga_memory,
NULL); if (r) return r;
r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size,
adev->mman.stolen_extended_size,
&adev->mman.stolen_extended_memory,
NULL);
if (r) return r;
r = amdgpu_bo_create_kernel_at(adev,
adev->mman.stolen_reserved_offset,
adev->mman.stolen_reserved_size,
&adev->mman.stolen_reserved_memory,
NULL); if (r) return r;
} else {
DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n");
}
/* Compute GTT size, either based on TTM limit * or whatever the user passed on module init.
*/
gtt_size = ttm_tt_pages_limit() << PAGE_SHIFT; if (amdgpu_gtt_size != -1) {
uint64_t configured_size = (uint64_t)amdgpu_gtt_size << 20;
drm_warn(&adev->ddev, "Configuring gttsize via module parameter is deprecated, please use ttm.pages_limit\n"); if (gtt_size != configured_size)
drm_warn(&adev->ddev, "GTT size has been set as %llu but TTM size has been set as %llu, this is unusual\n",
configured_size, gtt_size);
gtt_size = configured_size;
}
/* Initialize GTT memory pool */
r = amdgpu_gtt_mgr_init(adev, gtt_size); if (r) {
dev_err(adev->dev, "Failed initializing GTT heap.\n"); return r;
}
dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n",
(unsignedint)(gtt_size / (1024 * 1024)));
if (adev->flags & AMD_IS_APU) { if (adev->gmc.real_vram_size < gtt_size)
adev->apu_prefer_gtt = true;
}
/* Initialize doorbell pool on PCI BAR */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); if (r) {
dev_err(adev->dev, "Failed initializing doorbell heap.\n"); return r;
}
/* Create a boorbell page for kernel usages */
r = amdgpu_doorbell_create_kernel_doorbells(adev); if (r) {
dev_err(adev->dev, "Failed to initialize kernel doorbells.\n"); return r;
}
/* Initialize preemptible memory pool */
r = amdgpu_preempt_mgr_init(adev); if (r) {
dev_err(adev->dev, "Failed initializing PREEMPT heap.\n"); return r;
}
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) {
dev_err(adev->dev, "Failed initializing GDS heap.\n"); return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); if (r) {
dev_err(adev->dev, "Failed initializing gws heap.\n"); return r;
}
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); if (r) {
dev_err(adev->dev, "Failed initializing oa heap.\n"); return r;
} if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT,
&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr))
DRM_WARN("Debug VRAM access will use slowpath MM access\n");
return 0;
}
/* * amdgpu_ttm_fini - De-initialize the TTM memory pools
*/ void amdgpu_ttm_fini(struct amdgpu_device *adev)
{ int idx;
if (!adev->mman.initialized) return;
amdgpu_ttm_pools_fini(adev);
amdgpu_ttm_training_reserve_vram_fini(adev); /* return the stolen vga memory back to VRAM */ if (!adev->gmc.is_app_apu) {
amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); /* return the FW reserved memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL,
NULL);
amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL,
NULL); if (adev->mman.stolen_reserved_size)
amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
NULL, NULL);
}
amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL,
&adev->mman.sdma_access_ptr);
amdgpu_ttm_fw_reserve_vram_fini(adev);
amdgpu_ttm_drv_reserve_vram_fini(adev);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (adev->mman.aper_base_kaddr)
iounmap(adev->mman.aper_base_kaddr);
adev->mman.aper_base_kaddr = NULL;
/** * amdgpu_ttm_set_buffer_funcs_status - enable/disable use of buffer functions * * @adev: amdgpu_device pointer * @enable: true when we can use buffer functions. * * Enable/disable use of buffer functions during suspend/resume. This should * only be called at bootup or when userspace isn't running.
*/ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
{ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
uint64_t size; int r;
if (enable) { struct amdgpu_ring *ring; struct drm_gpu_scheduler *sched;
ring = adev->mman.buffer_funcs_ring;
sched = &ring->sched;
r = drm_sched_entity_init(&adev->mman.high_pr,
DRM_SCHED_PRIORITY_KERNEL, &sched,
1, NULL); if (r) {
dev_err(adev->dev, "Failed setting up TTM BO move entity (%d)\n",
r); return;
}
r = drm_sched_entity_init(&adev->mman.low_pr,
DRM_SCHED_PRIORITY_NORMAL, &sched,
1, NULL); if (r) {
dev_err(adev->dev, "Failed setting up TTM BO move entity (%d)\n",
r); goto error_free_entity;
}
} else {
drm_sched_entity_destroy(&adev->mman.high_pr);
drm_sched_entity_destroy(&adev->mman.low_pr);
dma_fence_put(man->move);
man->move = NULL;
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */ if (enable)
size = adev->gmc.real_vram_size; else
size = adev->gmc.visible_vram_size;
man->size = size;
adev->mman.buffer_funcs_enabled = enable;
/** * amdgpu_ttm_evict_resources - evict memory buffers * @adev: amdgpu device object * @mem_type: evicted BO's memory type * * Evicts all @mem_type buffers on the lru list of the memory type. * * Returns: * 0 for success or a negative error code on failure.
*/
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.