/* * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA * Copyright 2020 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Christian König
*/
/* Limit the number of pages in the pool to about 50% of the total * system memory.
*/
num_pages = ((u64)si.totalram * si.mem_unit) >> PAGE_SHIFT;
num_pages /= 2;
/* But for DMA32 we limit ourself to only use 2GiB maximum. */
num_dma32 = (u64)(si.totalram - si.totalhigh) * si.mem_unit
>> PAGE_SHIFT;
num_dma32 = min(num_dma32, 2UL << (30 - PAGE_SHIFT));
/* Retry without GFP_DMA32 for platforms DMA32 is not available */ if (unlikely(glob->dummy_read_page == NULL)) {
glob->dummy_read_page = alloc_page(__GFP_ZERO); if (unlikely(glob->dummy_read_page == NULL)) {
ret = -ENOMEM; goto out;
}
pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
}
debugfs_create_atomic_t("buffer_objects", 0444, ttm_debugfs_root,
&glob->bo_count);
out: if (ret && ttm_debugfs_root)
debugfs_remove(ttm_debugfs_root); if (ret)
--ttm_glob_use_count;
mutex_unlock(&ttm_global_mutex); return ret;
}
/** * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation. * * @bdev: A pointer to a struct ttm_device to prepare hibernation for. * * Return: 0 on success, negative number on failure.
*/ int ttm_device_prepare_hibernation(struct ttm_device *bdev)
{ struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false,
}; int ret;
do {
ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL);
} while (ret > 0); return ret;
}
EXPORT_SYMBOL(ttm_device_prepare_hibernation);
/* * A buffer object shrink method that tries to swap out the first * buffer object on the global::swap_lru list.
*/ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags)
{ struct ttm_global *glob = &ttm_glob; struct ttm_device *bdev; int ret = 0;
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i); if (!man || !man->use_tt) continue;
lret = ttm_bo_swapout(bdev, ctx, man, gfp_flags, 1); /* Can be both positive (num_pages) and negative (error) */ if (lret) return lret;
} return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
/** * ttm_device_init * * @bdev: A pointer to a struct ttm_device to initialize. * @funcs: Function table for the device. * @dev: The core kernel device pointer for DMA mappings and allocations. * @mapping: The address space to use for this bo. * @vma_manager: A pointer to a vma manager. * @use_dma_alloc: If coherent DMA allocation API should be used. * @use_dma32: If we should use GFP_DMA32 for device memory allocations. * * Initializes a struct ttm_device: * Returns: * !0: Failure.
*/ int ttm_device_init(struct ttm_device *bdev, conststruct ttm_device_funcs *funcs, struct device *dev, struct address_space *mapping, struct drm_vma_offset_manager *vma_manager, bool use_dma_alloc, bool use_dma32)
{ struct ttm_global *glob = &ttm_glob; int ret, nid;
man = ttm_manager_type(bdev, TTM_PL_SYSTEM);
ttm_resource_manager_set_used(man, false);
ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, NULL);
spin_lock(&bdev->lru_lock); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) if (list_empty(&man->lru[0]))
pr_debug("Swap list %d was clean\n", i);
spin_unlock(&bdev->lru_lock);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.