// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2012-2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. *
**************************************************************************/
/* * struct vmw_mob - Structure containing page table and metadata for a * Guest Memory OBject. * * @num_pages Number of pages that make up the page table. * @pt_level The indirection level of the page table. 0-2. * @pt_root_page DMA address of the level 0 page of the page table.
*/ struct vmw_mob { struct vmw_bo *pt_bo; unsignedlong num_pages; unsigned pt_level;
dma_addr_t pt_root_page;
uint32_t id;
};
staticinlinevoid vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo)
{ int ret = ttm_bo_reserve(bo, false, true, NULL);
BUG_ON(ret != 0);
ttm_bo_unpin(bo);
ttm_bo_unreserve(bo);
}
/* * vmw_setup_otable_base - Issue an object table base setup command to * the device * * @dev_priv: Pointer to a device private structure * @type: Type of object table base * @offset Start of table offset into dev_priv::otable_bo * @otable Pointer to otable metadata; * * This function returns -ENOMEM if it fails to reserve fifo space, * and may block waiting for fifo space.
*/ staticint vmw_setup_otable_base(struct vmw_private *dev_priv,
SVGAOTableType type, struct ttm_buffer_object *otable_bo, unsignedlong offset, struct vmw_otable *otable)
{ struct {
SVGA3dCmdHeader header;
SVGA3dCmdSetOTableBase64 body;
} *cmd; struct vmw_mob *mob; conststruct vmw_sg_table *vsgt; struct vmw_piter iter; int ret;
/* * The device doesn't support this, But the otable size is * determined at compile-time, so this BUG shouldn't trigger * randomly.
*/
BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2);
/* * vmw_otables_setup - Set up guest backed memory object tables * * @dev_priv: Pointer to a device private structure * * Takes care of the device guest backed surface * initialization, by setting up the guest backed memory object tables. * Returns 0 on success and various error codes on failure. A successful return * means the object tables can be taken down using the vmw_otables_takedown * function.
*/ int vmw_otables_setup(struct vmw_private *dev_priv)
{ struct vmw_otable **otables = &dev_priv->otable_batch.otables; int ret;
if (has_sm4_context(dev_priv)) {
*otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); if (!(*otables)) return -ENOMEM;
/* * vmw_otables_takedown - Take down guest backed memory object tables * * @dev_priv: Pointer to a device private structure * * Take down the Guest Memory Object tables.
*/ void vmw_otables_takedown(struct vmw_private *dev_priv)
{
vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch);
kfree(dev_priv->otable_batch.otables);
}
/* * vmw_mob_calculate_pt_pages - Calculate the number of page table pages * needed for a guest backed memory object. * * @data_pages: Number of data pages in the memory object buffer.
*/ staticunsignedlong vmw_mob_calculate_pt_pages(unsignedlong data_pages)
{ unsignedlong data_size = data_pages * PAGE_SIZE; unsignedlong tot_size = 0;
/* * vmw_mob_create - Create a mob, but don't populate it. * * @data_pages: Number of data pages of the underlying buffer object.
*/ struct vmw_mob *vmw_mob_create(unsignedlong data_pages)
{ struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL);
/* * vmw_mob_pt_populate - Populate the mob pagetable * * @mob: Pointer to the mob the pagetable of which we want to * populate. * * This function allocates memory to be used for the pagetable. * Returns ENOMEM if memory resources aren't sufficient and may * cause TTM buffer objects to be swapped out.
*/ staticint vmw_mob_pt_populate(struct vmw_private *dev_priv, struct vmw_mob *mob)
{
BUG_ON(mob->pt_bo != NULL);
/** * vmw_mob_assign_ppn - Assign a value to a page table entry * * @addr: Pointer to pointer to page table entry. * @val: The page table entry * * Assigns a value to a page table entry pointed to by *@addr and increments * *@addr according to the page table entry size.
*/ #if (VMW_PPN_SIZE == 8) staticvoid vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
*((u64 *) *addr) = val >> PAGE_SHIFT;
*addr += 2;
} #else staticvoid vmw_mob_assign_ppn(u32 **addr, dma_addr_t val)
{
*(*addr)++ = val >> PAGE_SHIFT;
} #endif
/* * vmw_mob_build_pt - Build a pagetable * * @data_addr: Array of DMA addresses to the underlying buffer * object's data pages. * @num_data_pages: Number of buffer object data pages. * @pt_pages: Array of page pointers to the page table pages. * * Returns the number of page table pages actually used. * Uses atomic kmaps of highmem pages to avoid TLB thrashing.
*/ staticunsignedlong vmw_mob_build_pt(struct vmw_piter *data_iter, unsignedlong num_data_pages, struct vmw_piter *pt_iter)
{ unsignedlong pt_size = num_data_pages * VMW_PPN_SIZE; unsignedlong num_pt_pages = DIV_ROUND_UP(pt_size, PAGE_SIZE); unsignedlong pt_page;
u32 *addr, *save_addr; unsignedlong i; struct page *page;
/* * vmw_mob_destroy - Destroy a mob, unpopulating first if necessary. * * @mob: Pointer to a mob to destroy.
*/ void vmw_mob_destroy(struct vmw_mob *mob)
{ if (mob->pt_bo) {
vmw_bo_unpin_unlocked(&mob->pt_bo->tbo);
vmw_bo_unreference(&mob->pt_bo);
}
kfree(mob);
}
/* * vmw_mob_unbind - Hide a mob from the device. * * @dev_priv: Pointer to a device private. * @mob_id: Device id of the mob to unbind.
*/ void vmw_mob_unbind(struct vmw_private *dev_priv, struct vmw_mob *mob)
{ struct {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBMob body;
} *cmd; int ret; struct ttm_buffer_object *bo = &mob->pt_bo->tbo;
if (bo) {
ret = ttm_bo_reserve(bo, false, true, NULL); /* * Noone else should be using this buffer.
*/
BUG_ON(ret != 0);
}
if (bo) {
vmw_bo_fence_single(bo, NULL);
ttm_bo_unreserve(bo);
}
vmw_fifo_resource_dec(dev_priv);
}
/* * vmw_mob_bind - Make a mob visible to the device after first * populating it if necessary. * * @dev_priv: Pointer to a device private. * @mob: Pointer to the mob we're making visible. * @data_addr: Array of DMA addresses to the data pages of the underlying * buffer object. * @num_data_pages: Number of data pages of the underlying buffer * object. * @mob_id: Device id of the mob to bind * * This function is intended to be interfaced with the ttm_tt backend * code.
*/ int vmw_mob_bind(struct vmw_private *dev_priv, struct vmw_mob *mob, conststruct vmw_sg_table *vsgt, unsignedlong num_data_pages,
int32_t mob_id)
{ int ret; bool pt_set_up = false; struct vmw_piter data_iter; struct {
SVGA3dCmdHeader header;
SVGA3dCmdDefineGBMob64 body;
} *cmd;
mob->id = mob_id;
vmw_piter_start(&data_iter, vsgt, 0); if (unlikely(!vmw_piter_next(&data_iter))) return 0;
if (likely(num_data_pages == 1)) {
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
} elseif (unlikely(mob->pt_bo == NULL)) {
ret = vmw_mob_pt_populate(dev_priv, mob); if (unlikely(ret != 0)) return ret;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.