/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. *
**************************************************************************/ /* * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
/* * Allocates a ttm structure for the given BO.
*/ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
{ struct ttm_device *bdev = bo->bdev; struct drm_device *ddev = bo->base.dev;
uint32_t page_flags = 0;
dma_resv_assert_held(bo->base.resv);
if (bo->ttm) return 0;
switch (bo->type) { case ttm_bo_type_device: if (zero_alloc)
page_flags |= TTM_TT_FLAG_ZERO_ALLOC; break; case ttm_bo_type_kernel: break; case ttm_bo_type_sg:
page_flags |= TTM_TT_FLAG_EXTERNAL; break; default:
pr_err("Illegal buffer object type\n"); return -EINVAL;
} /* * When using dma_alloc_coherent with memory encryption the * mapped TT pages need to be decrypted or otherwise the drivers * will end up sending encrypted mem to the gpu.
*/ if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
page_flags |= TTM_TT_FLAG_DECRYPTED;
drm_info_once(ddev, "TT memory decryption enabled.");
}
bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags); if (unlikely(bo->ttm == NULL)) return -ENOMEM;
/* * Allocates storage for pointers to the pages that back the ttm.
*/ staticint ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
ttm->pages = kvcalloc(ttm->num_pages, sizeof(void*), GFP_KERNEL); if (!ttm->pages) return -ENOMEM;
for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page_gfp(swap_space, i,
gfp_mask); if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page); goto out_err;
}
to_page = ttm->pages[i]; if (unlikely(to_page == NULL)) {
ret = -ENOMEM; goto out_err;
}
/** * ttm_tt_backup() - Helper to back up a struct ttm_tt. * @bdev: The TTM device. * @tt: The struct ttm_tt. * @flags: Flags that govern the backup behaviour. * * Update the page accounting and call ttm_pool_shrink_tt to free pages * or back them up. * * Return: Number of pages freed or swapped out, or negative error code on * error.
*/ long ttm_tt_backup(struct ttm_device *bdev, struct ttm_tt *tt, conststruct ttm_backup_flags flags)
{ long ret;
if (WARN_ON(IS_ERR_OR_NULL(tt->backup))) return 0;
ret = ttm_pool_backup(&bdev->pool, tt, &flags); if (ret > 0) {
tt->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
tt->page_flags |= TTM_TT_FLAG_BACKED_UP;
}
return ret;
}
int ttm_tt_restore(struct ttm_device *bdev, struct ttm_tt *tt, conststruct ttm_operation_ctx *ctx)
{ int ret = ttm_pool_restore_and_alloc(&bdev->pool, tt, ctx);
if (ret) return ret;
tt->page_flags &= ~TTM_TT_FLAG_BACKED_UP;
return 0;
}
EXPORT_SYMBOL(ttm_tt_restore);
/** * ttm_tt_swapout - swap out tt object * * @bdev: TTM device structure. * @ttm: The struct ttm_tt. * @gfp_flags: Flags to use for memory allocation. * * Swapout a TT object to a shmem_file, return number of pages swapped out or * negative error code.
*/ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
gfp_t gfp_flags)
{
loff_t size = (loff_t)ttm->num_pages << PAGE_SHIFT; struct address_space *swap_space; struct file *swap_storage; struct page *from_page; struct page *to_page; int i, ret;
int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{ int ret;
if (!ttm) return -EINVAL;
if (ttm_tt_is_populated(ttm)) return 0;
if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated); if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
while (atomic_long_read(&ttm_pages_allocated) > ttm_pages_limit ||
atomic_long_read(&ttm_dma32_pages_allocated) >
ttm_dma32_pages_limit) {
ret = ttm_global_swapout(ctx, GFP_KERNEL); if (ret == 0) break; if (ret < 0) goto error;
}
if (bdev->funcs->ttm_tt_populate)
ret = bdev->funcs->ttm_tt_populate(bdev, ttm, ctx); else
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx); if (ret) goto error;
ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
ttm->page_flags &= ~TTM_TT_FLAG_BACKED_UP; if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm); if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm); return ret;
}
}
return 0;
error: if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated); if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
} return ret;
}
/* Test the shrinker functions and dump the result */ staticint ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
{ struct ttm_operation_ctx ctx = { false, false };
/* * ttm_tt_mgr_init - register with the MM shrinker * * Register with the MM shrinker for swapping out BOs.
*/ void ttm_tt_mgr_init(unsignedlong num_pages, unsignedlong num_dma32_pages)
{ #ifdef CONFIG_DEBUG_FS
debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
&ttm_tt_debugfs_shrink_fops); #endif
if (!ttm_pages_limit)
ttm_pages_limit = num_pages;
if (!ttm_dma32_pages_limit)
ttm_dma32_pages_limit = num_dma32_pages;
}
/** * ttm_tt_setup_backup() - Allocate and assign a backup structure for a ttm_tt * @tt: The ttm_tt for wich to allocate and assign a backup structure. * * Assign a backup structure to be used for tt backup. This should * typically be done at bo creation, to avoid allocations at shrinking * time. * * Return: 0 on success, negative error code on failure.
*/ int ttm_tt_setup_backup(struct ttm_tt *tt)
{ struct file *backup =
ttm_backup_shmem_create(((loff_t)tt->num_pages) << PAGE_SHIFT);
if (WARN_ON_ONCE(!(tt->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE))) return -EINVAL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.