// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. *
**************************************************************************/
/** * __vmw_piter_non_sg_next: Helper functions to advance * a struct vmw_piter iterator. * * @viter: Pointer to the iterator. * * These functions return false if past the end of the list, * true otherwise. Functions are selected depending on the current * DMA mapping mode.
*/ staticbool __vmw_piter_non_sg_next(struct vmw_piter *viter)
{ return ++(viter->i) < viter->num_pages;
}
staticbool __vmw_piter_sg_next(struct vmw_piter *viter)
{ bool ret = __vmw_piter_non_sg_next(viter);
/** * vmw_piter_start - Initialize a struct vmw_piter. * * @viter: Pointer to the iterator to initialize * @vsgt: Pointer to a struct vmw_sg_table to initialize from * @p_offset: Pointer offset used to update current array position * * Note that we're following the convention of __sg_page_iter_start, so that * the iterator doesn't point to a valid page after initialization; it has * to be advanced one step first.
*/ void vmw_piter_start(struct vmw_piter *viter, conststruct vmw_sg_table *vsgt, unsignedlong p_offset)
{
viter->i = p_offset - 1;
viter->num_pages = vsgt->num_pages;
viter->pages = vsgt->pages; switch (vsgt->mode) { case vmw_dma_alloc_coherent:
viter->next = &__vmw_piter_non_sg_next;
viter->dma_address = &__vmw_piter_dma_addr;
viter->addrs = vsgt->addrs; break; case vmw_dma_map_populate: case vmw_dma_map_bind:
viter->next = &__vmw_piter_sg_next;
viter->dma_address = &__vmw_piter_sg_addr;
__sg_page_iter_start(&viter->iter.base, vsgt->sgt->sgl,
vsgt->sgt->orig_nents, p_offset); break; default:
BUG();
}
}
/** * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for * TTM pages * * @vmw_tt: Pointer to a struct vmw_ttm_backend * * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
*/ staticvoid vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
{ struct device *dev = vmw_tt->dev_priv->drm.dev;
/** * vmw_ttm_map_for_dma - map TTM pages to get device addresses * * @vmw_tt: Pointer to a struct vmw_ttm_backend * * This function is used to get device addresses from the kernel DMA layer. * However, it's violating the DMA API in that when this operation has been * performed, it's illegal for the CPU to write to the pages without first * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is * therefore only legal to call this function if we know that the function * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most * a CPU write buffer flush.
*/ staticint vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
{ struct device *dev = vmw_tt->dev_priv->drm.dev;
/** * vmw_ttm_map_dma - Make sure TTM pages are visible to the device * * @vmw_tt: Pointer to a struct vmw_ttm_tt * * Select the correct function for and make sure the TTM pages are * visible to the device. Allocate storage for the device mappings. * If a mapping has already been performed, indicated by the storage * pointer being non NULL, the function returns success.
*/ staticint vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
{ struct vmw_private *dev_priv = vmw_tt->dev_priv; struct vmw_sg_table *vsgt = &vmw_tt->vsgt; int ret = 0;
/** * vmw_ttm_unmap_dma - Tear down any TTM page device mappings * * @vmw_tt: Pointer to a struct vmw_ttm_tt * * Tear down any previously set up device DMA mappings and free * any storage space allocated for them. If there are no mappings set up, * this function is a NOP.
*/ staticvoid vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
{ struct vmw_private *dev_priv = vmw_tt->dev_priv;
if (!vmw_tt->vsgt.sgt) return;
switch (dev_priv->map_mode) { case vmw_dma_map_bind: case vmw_dma_map_populate:
vmw_ttm_unmap_from_dma(vmw_tt);
sg_free_table(vmw_tt->vsgt.sgt);
vmw_tt->vsgt.sgt = NULL; break; default: break;
}
vmw_tt->mapped = false;
}
/** * vmw_bo_sg_table - Return a struct vmw_sg_table object for a * TTM buffer object * * @bo: Pointer to a struct ttm_buffer_object * * Returns a pointer to a struct vmw_sg_table object. The object should * not be freed after use. * Note that for the device addresses to be valid, the buffer object must * either be reserved or pinned.
*/ conststruct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo)
{ struct vmw_ttm_tt *vmw_tt =
container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
switch (bo_mem->mem_type) { case VMW_PL_GMR:
ret = vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
ttm->num_pages, vmw_be->gmr_id); break; case VMW_PL_MOB: if (unlikely(vmw_be->mob == NULL)) {
vmw_be->mob =
vmw_mob_create(ttm->num_pages); if (unlikely(vmw_be->mob == NULL)) return -ENOMEM;
}
ret = vmw_mob_bind(vmw_be->dev_priv, vmw_be->mob,
&vmw_be->vsgt, ttm->num_pages,
vmw_be->gmr_id); break; case VMW_PL_SYSTEM: /* Nothing to be done for a system bind */ break; default:
BUG();
}
vmw_be->bound = true; return ret;
}
switch (mem->mem_type) { case TTM_PL_SYSTEM: case VMW_PL_SYSTEM: case VMW_PL_GMR: case VMW_PL_MOB: return 0; case TTM_PL_VRAM:
mem->bus.offset = (mem->start << PAGE_SHIFT) +
dev_priv->vram_start;
mem->bus.is_iomem = true;
mem->bus.caching = ttm_cached; break; default: return -EINVAL;
} return 0;
}
/** * vmw_move_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to move. * @old_mem: The old memory where we move from * @new_mem: The struct ttm_resource indicating to what memory * region the move is taking place. * * Calls move_notify for all subsystems needing it. * (currently only resources).
*/ staticvoid vmw_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *old_mem, struct ttm_resource *new_mem)
{
vmw_bo_move_notify(bo, new_mem);
vmw_query_move_notify(bo, old_mem, new_mem);
}
/** * vmw_swap_notify - TTM move_notify_callback * * @bo: The TTM buffer object about to be swapped out.
*/ staticvoid vmw_swap_notify(struct ttm_buffer_object *bo)
{
vmw_bo_swap_notify(bo);
(void) ttm_bo_wait(bo, false, false);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.