// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
**************************************************************************/
/** * vmw_dummy_query_bo_create - create a bo to hold a dummy query result * * @dev_priv: A device private structure. * * This function creates a small buffer object that holds the query * result for dummy queries emitted as query barriers. * The function will then map the first page and initialize a pending * occlusion query result structure, Finally it will unmap the buffer. * No interruptible waits are done within this function. * * Returns an error if bo creation or initialization fails.
*/ staticint vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
{ int ret; struct vmw_bo *vbo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; struct vmw_bo_params bo_params = {
.domain = VMW_BO_DOMAIN_SYS,
.busy_domain = VMW_BO_DOMAIN_SYS,
.bo_type = ttm_bo_type_kernel,
.size = PAGE_SIZE,
.pin = true,
.keep_resv = true,
};
/* * Create the vbo as pinned, so that a tryreserve will * immediately succeed. This is because we're the only * user of the bo currently.
*/
ret = vmw_bo_create(dev_priv, &bo_params, &vbo); if (unlikely(ret != 0)) return ret;
ret = ttm_bo_kmap(&vbo->tbo, 0, 1, &map); if (likely(ret == 0)) {
result = ttm_kmap_obj_virtual(&map, &dummy);
result->totalSize = sizeof(*result);
result->state = SVGA3D_QUERYSTATE_PENDING;
result->result32 = 0xff;
ttm_bo_kunmap(&map);
}
vmw_bo_pin_reserved(vbo, false);
ttm_bo_unreserve(&vbo->tbo);
/** * vmw_request_device_late - Perform late device setup * * @dev_priv: Pointer to device private. * * This function performs setup of otables and enables large command * buffer submission. These tasks are split out to a separate function * because it reverts vmw_release_device_early and is intended to be used * by an error path in the hibernation code.
*/ staticint vmw_request_device_late(struct vmw_private *dev_priv)
{ int ret;
if (dev_priv->has_mob) {
ret = vmw_otables_setup(dev_priv); if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize " "guest Memory OBjects.\n"); return ret;
}
}
if (dev_priv->cman) {
ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096); if (ret) { struct vmw_cmdbuf_man *man = dev_priv->cman;
staticint vmw_request_device(struct vmw_private *dev_priv)
{ int ret;
ret = vmw_device_init(dev_priv); if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize the device.\n"); return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); if (IS_ERR(dev_priv->cman)) {
dev_priv->cman = NULL;
dev_priv->sm_type = VMW_SM_LEGACY;
}
ret = vmw_request_device_late(dev_priv); if (ret) goto out_no_mob;
ret = vmw_dummy_query_bo_create(dev_priv); if (unlikely(ret != 0)) goto out_no_query_bo;
return 0;
out_no_query_bo: if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman); if (dev_priv->has_mob) { struct ttm_resource_manager *man;
man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
} if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
vmw_device_fini(dev_priv); return ret;
}
/** * vmw_release_device_early - Early part of fifo takedown. * * @dev_priv: Pointer to device private struct. * * This is the first part of command submission takedown, to be called before * buffer management is taken down.
*/ staticvoid vmw_release_device_early(struct vmw_private *dev_priv)
{ /* * Previous destructions should've released * the pinned bo.
*/
BUG_ON(dev_priv->pinned_bo != NULL);
vmw_bo_unreference(&dev_priv->dummy_query_bo); if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) { struct ttm_resource_manager *man;
man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB);
ttm_resource_manager_evict_all(&dev_priv->bdev, man);
vmw_otables_takedown(dev_priv);
}
}
/** * vmw_release_device_late - Late part of fifo takedown. * * @dev_priv: Pointer to device private struct. * * This is the last part of the command submission takedown, to be called when * command submission is no longer needed. It may wait on pending fences.
*/ staticvoid vmw_release_device_late(struct vmw_private *dev_priv)
{
vmw_fence_fifo_down(dev_priv->fman); if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
vmw_device_fini(dev_priv);
}
/* * Sets the initial_[width|height] fields on the given vmw_private. * * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then * clamping the value to fb_max_[width|height] fields and the * VMW_MIN_INITIAL_[WIDTH|HEIGHT]. * If the values appear to be invalid, set them to * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
*/ staticvoid vmw_get_initial_size(struct vmw_private *dev_priv)
{
uint32_t width;
uint32_t height;
/** * vmw_dma_select_mode - Determine how DMA mappings should be set up for this * system. * * @dev_priv: Pointer to a struct vmw_private * * This functions tries to determine what actions need to be taken by the * driver to make system pages visible to the device. * If this function decides that DMA is not possible, it returns -EINVAL. * The driver may then try to disable features of the device that require * DMA.
*/ staticint vmw_dma_select_mode(struct vmw_private *dev_priv)
{ staticconstchar *names[vmw_dma_map_max] = {
[vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* * When running with SEV we always want dma mappings, because * otherwise ttm tt pool pages will bounce through swiotlb running * out of available space.
*/ if (vmw_force_coherent || cc_platform_has(CC_ATTR_MEM_ENCRYPT))
dev_priv->map_mode = vmw_dma_alloc_coherent; elseif (vmw_restrict_iommu)
dev_priv->map_mode = vmw_dma_map_bind; else
dev_priv->map_mode = vmw_dma_map_populate;
/** * vmw_dma_masks - set required page- and dma masks * * @dev_priv: Pointer to struct drm-device * * With 32-bit we can only handle 32 bit PFNs. Optionally set that * restriction also for 64-bit systems.
*/ staticint vmw_dma_masks(struct vmw_private *dev_priv)
{ struct drm_device *dev = &dev_priv->drm; int ret = 0;
ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); if (sizeof(unsignedlong) == 4 || vmw_restrict_dma_mask) {
drm_info(&dev_priv->drm, "Restricting DMA addresses to 44 bits.\n"); return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
/* * This is approximate size of the vram, the exact size will only * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource * size will be equal to or bigger than the size reported by * SVGA_REG_VRAM_SIZE.
*/
drm_info(&dev->drm, "VRAM at %pa size is %llu KiB\n",
&dev->vram_start, (uint64_t)dev->vram_size / 1024);
if (!vmwgfx_supported(dev_priv)) {
vmw_disable_backdoor();
drm_err_once(&dev_priv->drm, "vmwgfx seems to be running on an unsupported hypervisor.");
drm_err_once(&dev_priv->drm, "This configuration is likely broken.");
drm_err_once(&dev_priv->drm, "Please switch to a supported graphics device to avoid problems.");
}
vmw_vkms_init(dev_priv);
ret = vmw_dma_select_mode(dev_priv); if (unlikely(ret != 0)) {
drm_info(&dev_priv->drm, "Restricting capabilities since DMA not available.\n");
refuse_dma = true; if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
drm_info(&dev_priv->drm, "Disabling 3D acceleration.\n");
}
drm_info(&dev_priv->drm, "MOB limits: max mob size = %u KiB, max mob pages = %u\n",
dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages);
ret = vmw_dma_masks(dev_priv); if (unlikely(ret != 0)) goto out_err0;
dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
drm_info(&dev_priv->drm, "Max GMR ids is %u\n",
(unsigned)dev_priv->max_gmr_ids);
drm_info(&dev_priv->drm, "Max number of GMR pages is %u\n",
(unsigned)dev_priv->max_gmr_pages);
}
drm_info(&dev_priv->drm, "Maximum display memory size is %llu KiB\n",
(uint64_t)dev_priv->max_primary_mem / 1024);
/* Need mmio memory to check for fifo pitchlock cap. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
!(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
!vmw_fifo_have_pitchlock(dev_priv)) {
ret = -ENOSYS;
DRM_ERROR("Hardware has no pitchlock\n"); goto out_err0;
}
/* * Enable VRAM, but initially don't use it until SVGA is enabled and * unhidden.
*/
ret = vmw_vram_manager_init(dev_priv); if (unlikely(ret != 0)) {
drm_err(&dev_priv->drm, "Failed initializing memory manager for VRAM.\n"); goto out_no_vram;
}
ret = vmw_devcaps_create(dev_priv); if (unlikely(ret != 0)) {
drm_err(&dev_priv->drm, "Failed initializing device caps.\n"); goto out_no_vram;
}
/* * "Guest Memory Regions" is an aperture like feature with * one slot per bo. There is an upper limit of the number of * slots as well as the bo size.
*/
dev_priv->has_gmr = true; /* TODO: This is most likely not correct */ if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
refuse_dma ||
vmw_gmrid_man_init(dev_priv, VMW_PL_GMR) != 0) {
drm_info(&dev_priv->drm, "No GMR memory available. " "Graphics memory resources are very limited.\n");
dev_priv->has_gmr = false;
}
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
if (vmw_gmrid_man_init(dev_priv, VMW_PL_MOB) != 0) {
drm_info(&dev_priv->drm, "No MOB memory available. " "3D will be disabled.\n");
dev_priv->has_mob = false;
} if (vmw_sys_man_init(dev_priv) != 0) {
drm_info(&dev_priv->drm, "No MOB page table memory available. " "3D will be disabled.\n");
dev_priv->has_mob = false;
}
}
if (dev_priv->has_mob && (dev_priv->capabilities & SVGA_CAP_DX)) { if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_DXCONTEXT))
dev_priv->sm_type = VMW_SM_4;
}
/* SVGA_CAP2_DX2 (DefineGBSurface_v3) is needed for SM4_1 support */ if (has_sm4_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX2)) { if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM41))
dev_priv->sm_type = VMW_SM_4_1; if (has_sm4_1_context(dev_priv) &&
(dev_priv->capabilities2 & SVGA_CAP2_DX3)) { if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_SM5)) {
dev_priv->sm_type = VMW_SM_5; if (vmw_devcap_get(dev_priv, SVGA3D_DEVCAP_GL43))
dev_priv->sm_type = VMW_SM_5_1X;
}
}
}
ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) goto out_no_kms;
vmw_overlay_init(dev_priv);
ret = vmw_request_device(dev_priv); if (ret) goto out_no_fifo;
staticvoid vmw_master_set(struct drm_device *dev, struct drm_file *file_priv, bool from_open)
{ /* * Inform a new master that the layout may have changed while * it was gone.
*/ if (!from_open)
drm_sysfs_hotplug_event(dev);
}
bool vmwgfx_supported(struct vmw_private *vmw)
{ #ifdefined(CONFIG_X86) return hypervisor_is_type(X86_HYPER_VMWARE); #elifdefined(CONFIG_ARM64) /* * On aarch64 only svga3 is supported
*/ return vmw->pci_id == VMWGFX_PCI_ID_SVGA3; #else
drm_warn_once(&vmw->drm, "vmwgfx is running on an unknown architecture."); returnfalse; #endif
}
/** * __vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. * * @dev_priv: Pointer to device private struct. * Needs the reservation sem to be held in non-exclusive mode.
*/ staticvoid __vmw_svga_enable(struct vmw_private *dev_priv)
{ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
if (!ttm_resource_manager_used(man)) {
vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
ttm_resource_manager_set_used(man, true);
}
}
/** * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. * * @dev_priv: Pointer to device private struct.
*/ void vmw_svga_enable(struct vmw_private *dev_priv)
{
__vmw_svga_enable(dev_priv);
}
/** * __vmw_svga_disable - Disable SVGA mode and use of VRAM. * * @dev_priv: Pointer to device private struct. * Needs the reservation sem to be held in exclusive mode. * Will not empty VRAM. VRAM must be emptied by caller.
*/ staticvoid __vmw_svga_disable(struct vmw_private *dev_priv)
{ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
/** * vmw_svga_disable - Disable SVGA_MODE, and use of VRAM. Keep the fifo * running. * * @dev_priv: Pointer to device private struct. * Will empty VRAM.
*/ void vmw_svga_disable(struct vmw_private *dev_priv)
{ struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); /* * Disabling SVGA will turn off device modesetting capabilities, so * notify KMS about that so that it doesn't cache atomic state that * isn't valid anymore, for example crtcs turned on. * Strictly we'd want to do this under the SVGA lock (or an SVGA mutex), * but vmw_kms_lost_device() takes the reservation sem and thus we'll * end up with lock order reversal. Thus, a master may actually perform * a new modeset just after we call vmw_kms_lost_device() and race with * vmw_svga_disable(), but that should at worst cause atomic KMS state * to be inconsistent with the device, causing modesetting problems. *
*/
vmw_kms_lost_device(&dev_priv->drm); if (ttm_resource_manager_used(man)) { if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
ttm_resource_manager_set_used(man, false);
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
}
switch (val) { case PM_HIBERNATION_PREPARE: /* * Take the reservation sem in write mode, which will make sure * there are no other processes holding a buffer object * reservation, meaning we should be able to evict all buffer * objects if needed. * Once user-space processes have been frozen, we can release * the lock again.
*/
dev_priv->suspend_locked = true; break; case PM_POST_HIBERNATION: case PM_POST_RESTORE: if (READ_ONCE(dev_priv->suspend_locked)) {
dev_priv->suspend_locked = false;
} break; default: break;
} return 0;
}
/* * No user-space processes should be running now.
*/
ret = vmw_kms_suspend(&dev_priv->drm); if (ret) {
DRM_ERROR("Failed to freeze modesetting.\n"); return ret;
}
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv); while (ttm_device_swapout(&dev_priv->bdev, &ctx, GFP_KERNEL) > 0);
vmw_fifo_resource_dec(dev_priv); if (atomic_read(&dev_priv->num_fifo_resources) != 0) {
DRM_ERROR("Can't hibernate while 3D resources are active.\n");
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspend_locked = false; if (dev_priv->suspend_state)
vmw_kms_resume(dev); return -EBUSY;
}
MODULE_AUTHOR("VMware Inc. and others");
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
MODULE_LICENSE("GPL and additional rights");
MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
__stringify(VMWGFX_DRIVER_MINOR) "."
__stringify(VMWGFX_DRIVER_PATCHLEVEL) "." "0");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.