/* SPDX-License-Identifier: GPL-2.0-only OR MIT */ /* * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE.
*/
/** * struct xe_res_cursor - state for walking over dma mapping, vram_mgr, * stolen_mgr, and gtt_mgr allocations
*/ struct xe_res_cursor { /** @start: Start of cursor */
u64 start; /** @size: Size of the current segment. */
u64 size; /** @remaining: Remaining bytes in cursor */
u64 remaining; /** @node: Opaque point current node cursor */ void *node; /** @mem_type: Memory type */
u32 mem_type; /** @sgl: Scatterlist for cursor */ struct scatterlist *sgl; /** @dma_addr: Current element in a struct drm_pagemap_device_addr array */ conststruct drm_pagemap_device_addr *dma_addr; /** @mm: Buddy allocator for VRAM cursor */ struct drm_buddy *mm; /** * @dma_start: DMA start address for the current segment. * This may be different to @dma_addr.addr since elements in * the array may be coalesced to a single segment.
*/
u64 dma_start; /** @dma_seg_size: Size of the current DMA segment. */
u64 dma_seg_size;
};
/** * xe_res_first - initialize a xe_res_cursor * * @res: TTM resource object to walk * @start: Start of the range * @size: Size of the range * @cur: cursor object to initialize * * Start walking over the range of allocations between @start and @size.
*/ staticinlinevoid xe_res_first(struct ttm_resource *res,
u64 start, u64 size, struct xe_res_cursor *cur)
{
cur->sgl = NULL;
cur->dma_addr = NULL; if (!res) goto fallback;
XE_WARN_ON(start + size > res->size);
cur->mem_type = res->mem_type;
switch (cur->mem_type) { case XE_PL_STOLEN: case XE_PL_VRAM0: case XE_PL_VRAM1: { struct drm_buddy_block *block; struct list_head *head, *next; struct drm_buddy *mm = xe_res_get_buddy(res);
head = &to_xe_ttm_vram_mgr_resource(res)->blocks;
block = list_first_entry_or_null(head, struct drm_buddy_block,
link); if (!block) goto fallback;
while (start >= drm_buddy_block_size(mm, block)) {
start -= drm_buddy_block_size(mm, block);
next = block->link.next; if (next != head)
block = list_entry(next, struct drm_buddy_block,
link);
}
/** * xe_res_first_sg - initialize a xe_res_cursor with a scatter gather table * * @sg: scatter gather table to walk * @start: Start of the range * @size: Size of the range * @cur: cursor object to initialize * * Start walking over the range of allocations between @start and @size.
*/ staticinlinevoid xe_res_first_sg(conststruct sg_table *sg,
u64 start, u64 size, struct xe_res_cursor *cur)
{
XE_WARN_ON(!sg);
cur->node = NULL;
cur->start = start;
cur->remaining = size;
cur->size = 0;
cur->dma_addr = NULL;
cur->sgl = sg->sgl;
cur->mem_type = XE_PL_TT;
__xe_res_sg_next(cur);
}
/** * xe_res_first_dma - initialize a xe_res_cursor with dma_addr array * * @dma_addr: struct drm_pagemap_device_addr array to walk * @start: Start of the range * @size: Size of the range * @cur: cursor object to initialize * * Start walking over the range of allocations between @start and @size.
*/ staticinlinevoid xe_res_first_dma(conststruct drm_pagemap_device_addr *dma_addr,
u64 start, u64 size, struct xe_res_cursor *cur)
{
XE_WARN_ON(!dma_addr);
XE_WARN_ON(!IS_ALIGNED(start, PAGE_SIZE) ||
!IS_ALIGNED(size, PAGE_SIZE));
/** * xe_res_next - advance the cursor * * @cur: the cursor to advance * @size: number of bytes to move forward * * Move the cursor @size bytes forwrad, walking to the next node if necessary.
*/ staticinlinevoid xe_res_next(struct xe_res_cursor *cur, u64 size)
{ struct drm_buddy_block *block; struct list_head *next;
u64 start;
XE_WARN_ON(size > cur->remaining);
cur->remaining -= size; if (!cur->remaining) return;
/** * xe_res_dma - return dma address of cursor at current position * * @cur: the cursor to return the dma address from
*/ staticinline u64 xe_res_dma(conststruct xe_res_cursor *cur)
{ if (cur->dma_addr) return cur->dma_start + cur->start; elseif (cur->sgl) return sg_dma_address(cur->sgl) + cur->start; else return cur->start;
}
/** * xe_res_is_vram() - Whether the cursor current dma address points to * same-device VRAM * @cur: The cursor. * * Return: true iff the address returned by xe_res_dma() points to internal vram.
*/ staticinlinebool xe_res_is_vram(conststruct xe_res_cursor *cur)
{ if (cur->dma_addr) return cur->dma_addr->proto == XE_INTERCONNECT_VRAM;
switch (cur->mem_type) { case XE_PL_STOLEN: case XE_PL_VRAM0: case XE_PL_VRAM1: returntrue; default: break;
}
returnfalse;
} #endif
Messung V0.5
¤ Dauer der Verarbeitung: 0.21 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.