// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
**************************************************************************/
/** * vmw_surface_dma_size - Compute fifo size for a dma command. * * @srf: Pointer to a struct vmw_surface * * Computes the required size for a surface dma command for backup or * restoration of the surface represented by @srf.
*/ staticinline uint32_t vmw_surface_dma_size(conststruct vmw_surface *srf)
{ return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma);
}
/** * vmw_surface_define_size - Compute fifo size for a surface define command. * * @srf: Pointer to a struct vmw_surface * * Computes the required size for a surface define command for the definition * of the surface represented by @srf.
*/ staticinline uint32_t vmw_surface_define_size(conststruct vmw_surface *srf)
{ returnsizeof(struct vmw_surface_define) + srf->metadata.num_sizes * sizeof(SVGA3dSize);
}
/** * vmw_surface_destroy_size - Compute fifo size for a surface destroy command. * * Computes the required size for a surface destroy command for the destruction * of a hw surface.
*/ staticinline uint32_t vmw_surface_destroy_size(void)
{ returnsizeof(struct vmw_surface_destroy);
}
/** * vmw_surface_destroy_encode - Encode a surface_destroy command. * * @id: The surface id * @cmd_space: Pointer to memory area in which the commands should be encoded.
*/ staticvoid vmw_surface_destroy_encode(uint32_t id, void *cmd_space)
{ struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
cmd_space;
/** * vmw_surface_define_encode - Encode a surface_define command. * * @srf: Pointer to a struct vmw_surface object. * @cmd_space: Pointer to memory area in which the commands should be encoded.
*/ staticvoid vmw_surface_define_encode(conststruct vmw_surface *srf, void *cmd_space)
{ struct vmw_surface_define *cmd = (struct vmw_surface_define *)
cmd_space; struct drm_vmw_size *src_size;
SVGA3dSize *cmd_size;
uint32_t cmd_len; int i;
cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
cmd->header.size = cmd_len;
cmd->body.sid = srf->res.id; /* * Downcast of surfaceFlags, was upcasted when received from user-space, * since driver internally stores as 64 bit. * For legacy surface define only 32 bit flag is supported.
*/
cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags;
cmd->body.format = srf->metadata.format; for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i];
for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) {
cmd_size->width = src_size->width;
cmd_size->height = src_size->height;
cmd_size->depth = src_size->depth;
}
}
/** * vmw_surface_dma_encode - Encode a surface_dma command. * * @srf: Pointer to a struct vmw_surface object. * @cmd_space: Pointer to memory area in which the commands should be encoded. * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents * should be placed or read from. * @to_surface: Boolean whether to DMA to the surface or from the surface.
*/ staticvoid vmw_surface_dma_encode(struct vmw_surface *srf, void *cmd_space, const SVGAGuestPtr *ptr, bool to_surface)
{
uint32_t i; struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space; conststruct SVGA3dSurfaceDesc *desc =
vmw_surface_get_desc(srf->metadata.format);
/** * vmw_hw_surface_destroy - destroy a Device surface * * @res: Pointer to a struct vmw_resource embedded in a struct * vmw_surface. * * Destroys a the device surface associated with a struct vmw_surface if * any, and adjusts resource count accordingly.
*/ staticvoid vmw_hw_surface_destroy(struct vmw_resource *res)
{
/** * vmw_legacy_srf_create - Create a device surface as part of the * resource validation process. * * @res: Pointer to a struct vmw_surface. * * If the surface doesn't have a hw id. * * Returns -EBUSY if there wasn't sufficient device resources to * complete the validation. Retry after freeing up resources. * * May return other errors if the kernel is out of guest resources.
*/ staticint vmw_legacy_srf_create(struct vmw_resource *res)
{ struct vmw_private *dev_priv = res->dev_priv; struct vmw_surface *srf;
uint32_t submit_size;
uint8_t *cmd; int ret;
/** * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface. * * @res: Pointer to a struct vmw_res embedded in a struct * vmw_surface. * @val_buf: Pointer to a struct ttm_validate_buffer containing * information about the backup buffer. * @bind: Boolean wether to DMA to the surface. * * Transfer backup data to or from a legacy surface as part of the * validation process. * May return other errors if the kernel is out of guest resources. * The backup buffer will be fenced or idle upon successful completion, * and if the surface needs persistent backup storage, the backup buffer * will also be returned reserved iff @bind is true.
*/ staticint vmw_legacy_srf_dma(struct vmw_resource *res, struct ttm_validate_buffer *val_buf, bool bind)
{
SVGAGuestPtr ptr; struct vmw_fence_obj *fence;
uint32_t submit_size; struct vmw_surface *srf = vmw_res_to_srf(res);
uint8_t *cmd; struct vmw_private *dev_priv = res->dev_priv;
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
return 0;
}
/** * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the * surface validation process. * * @res: Pointer to a struct vmw_res embedded in a struct * vmw_surface. * @val_buf: Pointer to a struct ttm_validate_buffer containing * information about the backup buffer. * * This function will copy backup data to the surface if the * backup buffer is dirty.
*/ staticint vmw_legacy_srf_bind(struct vmw_resource *res, struct ttm_validate_buffer *val_buf)
{ if (!res->guest_memory_dirty) return 0;
return vmw_legacy_srf_dma(res, val_buf, true);
}
/** * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the * surface eviction process. * * @res: Pointer to a struct vmw_res embedded in a struct * vmw_surface. * @readback: Readback - only true if dirty * @val_buf: Pointer to a struct ttm_validate_buffer containing * information about the backup buffer. * * This function will copy backup data from the surface.
*/ staticint vmw_legacy_srf_unbind(struct vmw_resource *res, bool readback, struct ttm_validate_buffer *val_buf)
{ if (unlikely(readback)) return vmw_legacy_srf_dma(res, val_buf, false); return 0;
}
/** * vmw_legacy_srf_destroy - Destroy a device surface as part of a * resource eviction process. * * @res: Pointer to a struct vmw_res embedded in a struct * vmw_surface.
*/ staticint vmw_legacy_srf_destroy(struct vmw_resource *res)
{ struct vmw_private *dev_priv = res->dev_priv;
uint32_t submit_size;
uint8_t *cmd;
BUG_ON(res->id == -1);
/* * Encode the dma- and surface destroy commands.
*/
/** * vmw_surface_init - initialize a struct vmw_surface * * @dev_priv: Pointer to a device private struct. * @srf: Pointer to the struct vmw_surface to initialize. * @res_free: Pointer to a resource destructor used to free * the object.
*/ staticint vmw_surface_init(struct vmw_private *dev_priv, struct vmw_surface *srf, void (*res_free) (struct vmw_resource *res))
{ int ret; struct vmw_resource *res = &srf->res;
/** * vmw_user_surface_base_to_res - TTM base object to resource converter for * user visible surfaces * * @base: Pointer to a TTM base object * * Returns the struct vmw_resource embedded in a struct vmw_surface * for the user-visible object identified by the TTM base object @base.
*/ staticstruct vmw_resource *
vmw_user_surface_base_to_res(struct ttm_base_object *base)
{ return &(container_of(base, struct vmw_user_surface,
prime.base)->srf.res);
}
/** * vmw_user_surface_free - User visible surface resource destructor * * @res: A struct vmw_resource embedded in a struct vmw_surface.
*/ staticvoid vmw_user_surface_free(struct vmw_resource *res)
{ struct vmw_surface *srf = vmw_res_to_srf(res); struct vmw_user_surface *user_srf =
container_of(srf, struct vmw_user_surface, srf);
WARN_ON(res->dirty); if (user_srf->master)
drm_master_put(&user_srf->master);
kfree(srf->offsets);
kfree(srf->metadata.sizes);
kfree(srf->snooper.image);
ttm_prime_object_kfree(user_srf, prime);
}
/** * vmw_user_surface_base_release - User visible surface TTM base object destructor * * @p_base: Pointer to a pointer to a TTM base object * embedded in a struct vmw_user_surface. * * Drops the base object's reference on its resource, and the * pointer pointed to by *p_base is set to NULL.
*/ staticvoid vmw_user_surface_base_release(struct ttm_base_object **p_base)
{ struct ttm_base_object *base = *p_base; struct vmw_user_surface *user_srf =
container_of(base, struct vmw_user_surface, prime.base); struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
/* * Dumb buffers own the resource and they'll unref the * resource themselves
*/
WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb);
vmw_resource_unreference(&res);
}
/** * vmw_surface_destroy_ioctl - Ioctl function implementing * the user surface destroy functionality. * * @dev: Pointer to a struct drm_device. * @data: Pointer to data copied from / to user-space. * @file_priv: Pointer to a drm file private structure.
*/ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
ret = drm_gem_prime_fd_to_handle(&dev_priv->drm, file_priv, fd, handle); if (ret) {
drm_warn(&dev_priv->drm, "Wasn't able to find user buffer for fd = %u.\n", fd); return ret;
}
ret = vmw_user_bo_lookup(file_priv, *handle, &bo); if (ret) {
drm_warn(&dev_priv->drm, "Wasn't able to lookup user buffer for handle = %u.\n", *handle); return ret;
}
user_srf = vmw_lookup_user_surface_for_buffer(dev_priv, bo, *handle); if (WARN_ON(!user_srf)) {
drm_warn(&dev_priv->drm, "User surface fd %d (handle %d) is null.\n", fd, *handle);
ret = -EINVAL; goto out;
}
base = &user_srf->prime.base;
ret = ttm_ref_object_add(tfile, base, NULL, false); if (ret) {
drm_warn(&dev_priv->drm, "Couldn't add an object ref for the buffer (%d).\n", *handle); goto out;
}
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); if (ret) return vmw_buffer_prime_to_surface_base(dev_priv,
file_priv,
u_handle,
&handle,
base_p);
} else {
handle = u_handle;
}
ret = -EINVAL;
base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); if (unlikely(!base)) {
VMW_DEBUG_USER("Could not find surface to reference.\n"); goto out_no_lookup;
}
if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) {
VMW_DEBUG_USER("Referenced object is not a surface.\n"); goto out_bad_resource;
} if (handle_type != DRM_VMW_HANDLE_PRIME) { bool require_exist = false;
/* Error out if we are unauthenticated primary */ if (drm_is_primary_client(file_priv) &&
!file_priv->authenticated) {
ret = -EACCES; goto out_bad_resource;
}
/* * Make sure the surface creator has the same * authenticating master, or is already registered with us.
*/ if (drm_is_primary_client(file_priv) &&
user_srf->master != file_priv->master)
require_exist = true;
if (unlikely(drm_is_render_client(file_priv)))
require_exist = true;
ret = ttm_ref_object_add(tfile, base, NULL, require_exist); if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n"); goto out_bad_resource;
}
}
*base_p = base; return 0;
out_bad_resource:
ttm_base_object_unref(&base);
out_no_lookup: if (handle_type == DRM_VMW_HANDLE_PRIME)
(void) ttm_ref_object_base_unref(tfile, handle);
return ret;
}
/** * vmw_surface_reference_ioctl - Ioctl function implementing * the user surface reference functionality. * * @dev: Pointer to a struct drm_device. * @data: Pointer to data copied from / to user-space. * @file_priv: Pointer to a drm file private structure.
*/ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{ struct vmw_private *dev_priv = vmw_priv(dev); union drm_vmw_surface_reference_arg *arg =
(union drm_vmw_surface_reference_arg *)data; struct drm_vmw_surface_arg *req = &arg->req; struct drm_vmw_surface_create_req *rep = &arg->rep; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_surface *srf; struct vmw_user_surface *user_srf; struct drm_vmw_size __user *user_sizes; struct ttm_base_object *base; int ret;
ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid,
req->handle_type, &base); if (unlikely(ret != 0)) return ret;
/* Downcast of flags when sending back to user space */
rep->flags = (uint32_t)srf->metadata.flags;
rep->format = srf->metadata.format;
memcpy(rep->mip_levels, srf->metadata.mip_levels, sizeof(srf->metadata.mip_levels));
user_sizes = (struct drm_vmw_size __user *)(unsignedlong)
rep->size_addr;
if (user_sizes)
ret = copy_to_user(user_sizes, &srf->metadata.base_size, sizeof(srf->metadata.base_size)); if (unlikely(ret != 0)) {
VMW_DEBUG_USER("copy_to_user failed %p %u\n", user_sizes,
srf->metadata.num_sizes);
ttm_ref_object_base_unref(tfile, base->handle);
ret = -EFAULT;
}
vmw_fifo_resource_inc(dev_priv);
ret = vmw_resource_alloc_id(res); if (unlikely(ret != 0)) {
DRM_ERROR("Failed to allocate a surface id.\n"); goto out_no_id;
}
if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) {
ret = -EBUSY; goto out_no_fifo;
}
/** * vmw_gb_surface_define_internal - Ioctl function implementing * the user surface define functionality. * * @dev: Pointer to a struct drm_device. * @req: Request argument from user-space. * @rep: Response argument to user-space. * @file_priv: Pointer to a drm file private structure.
*/ staticint
vmw_gb_surface_define_internal(struct drm_device *dev, struct drm_vmw_gb_surface_create_ext_req *req, struct drm_vmw_gb_surface_create_rep *rep, struct drm_file *file_priv)
{ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_user_surface *user_srf; struct vmw_surface_metadata metadata = {0}; struct vmw_surface *srf; struct vmw_resource *res; struct vmw_resource *tmp; int ret = 0;
uint32_t backup_handle = 0;
SVGA3dSurfaceAllFlags svga3d_flags_64 =
SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits,
req->base.svga3d_flags);
/* array_size must be null for non-GL3 host. */ if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) {
VMW_DEBUG_USER("SM4 surface not supported.\n"); return -EINVAL;
}
if (!has_sm4_1_context(dev_priv)) { if (req->svga3d_flags_upper_32_bits != 0)
ret = -EINVAL;
if (req->base.multisample_count != 0)
ret = -EINVAL;
if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE)
ret = -EINVAL;
if (req->quality_level != SVGA3D_MS_QUALITY_NONE)
ret = -EINVAL;
if (ret) {
VMW_DEBUG_USER("SM4.1 surface not supported.\n"); return ret;
}
}
if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) {
VMW_DEBUG_USER("SM5 surface not supported.\n"); return -EINVAL;
}
/* Define a surface based on the parameters. */
ret = vmw_gb_surface_define(dev_priv, &metadata, &srf); if (ret != 0) {
VMW_DEBUG_USER("Failed to define surface.\n"); return ret;
}
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle,
&res->guest_memory_bo); if (ret == 0) { if (res->guest_memory_bo->is_dumb) {
VMW_DEBUG_USER("Can't backup surface with a dumb buffer.\n");
vmw_user_bo_unref(&res->guest_memory_bo);
ret = -EINVAL; goto out_unlock;
} elseif (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) {
VMW_DEBUG_USER("Surface backup buffer too small.\n");
vmw_user_bo_unref(&res->guest_memory_bo);
ret = -EINVAL; goto out_unlock;
} else {
backup_handle = req->base.buffer_handle;
}
}
} elseif (req->base.drm_surface_flags &
(drm_vmw_surface_flag_create_buffer |
drm_vmw_surface_flag_coherent)) {
ret = vmw_gem_object_create_with_handle(dev_priv, file_priv,
res->guest_memory_size,
&backup_handle,
&res->guest_memory_bo);
}
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); goto out_unlock;
}
if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { struct vmw_bo *backup = res->guest_memory_bo;
ttm_bo_reserve(&backup->tbo, false, false, NULL); if (!res->func->dirty_alloc)
ret = -EINVAL; if (!ret)
ret = vmw_bo_dirty_add(backup); if (!ret) {
res->coherent = true;
ret = res->func->dirty_alloc(res);
}
ttm_bo_unreserve(&backup->tbo); if (ret) {
vmw_resource_unreference(&res); goto out_unlock;
}
}
if (res->guest_memory_bo) {
ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); goto out_unlock;
}
}
tmp = vmw_resource_reference(res);
ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime,
VMW_RES_SURFACE,
&vmw_user_surface_base_release);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
vmw_resource_unreference(&res); goto out_unlock;
}
/** * vmw_subres_dirty_add - Add a dirty region to a subresource * @dirty: The surfaces's dirty tracker. * @loc_start: The location corresponding to the start of the region. * @loc_end: The location corresponding to the end of the region. * * As we are assuming that @loc_start and @loc_end represent a sequential * range of backing store memory, if the region spans multiple lines then * regardless of the x coordinate, the full lines are dirtied. * Correspondingly if the region spans multiple z slices, then full rather * than partial z slices are dirtied.
*/ staticvoid vmw_subres_dirty_add(struct vmw_surface_dirty *dirty, conststruct vmw_surface_loc *loc_start, conststruct vmw_surface_loc *loc_end)
{ conststruct vmw_surface_cache *cache = &dirty->cache;
SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource];
u32 mip = loc_start->sub_resource % cache->num_mip_levels; conststruct drm_vmw_size *size = &cache->mip[mip].size;
u32 box_c2 = box->z + box->d;
if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) return;
if (box->d == 0 || box->z > loc_start->z)
box->z = loc_start->z; if (box_c2 < loc_end->z)
box->d = loc_end->z - box->z;
/* * Multiple multisample sheets. To do this in an optimized * fashion, compute the dirty region for each sheet and the * resulting union. Since this is not a common case, just dirty * the whole surface.
*/ for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
vmw_subres_dirty_full(dirty, sub_res); return;
} if (loc1.sub_resource + 1 == loc2.sub_resource) { /* Dirty range covers a single sub-resource */
vmw_subres_dirty_add(dirty, &loc1, &loc2);
} else { /* Dirty range covers multiple sub-resources */ struct vmw_surface_loc loc_min, loc_max;
u32 sub_res;
/* * vmw_gb_surface_define - Define a private GB surface * * @dev_priv: Pointer to a device private. * @metadata: Metadata representing the surface to create. * @user_srf_out: allocated user_srf. Set to NULL on failure. * * GB surfaces allocated by this function will not have a user mode handle, and * thus will only be visible to vmwgfx. For optimization reasons the * surface may later be given a user mode handle by another function to make * it available to user mode drivers.
*/ int vmw_gb_surface_define(struct vmw_private *dev_priv, conststruct vmw_surface_metadata *req, struct vmw_surface **srf_out)
{ struct vmw_surface_metadata *metadata; struct vmw_user_surface *user_srf; struct vmw_surface *srf;
u32 sample_count = 1;
u32 num_layers = 1; int ret;
*srf_out = NULL;
if (req->scanout) { if (!vmw_surface_is_screen_target_format(req->format)) {
VMW_DEBUG_USER("Invalid Screen Target surface format."); return -EINVAL;
}
if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT)
srf->res.guest_memory_size += sizeof(SVGA3dDXSOState);
/* * Don't set SVGA3D_SURFACE_SCREENTARGET flag for a scanout surface with * size greater than STDU max width/height. This is really a workaround * to support creation of big framebuffer requested by some user-space * for whole topology. That big framebuffer won't really be used for * binding with screen target as during prepare_fb a separate surface is * created so it's safe to ignore SVGA3D_SURFACE_SCREENTARGET flag.
*/ if (dev_priv->active_display_unit == vmw_du_screen_target &&
metadata->scanout &&
metadata->base_size.width <= dev_priv->stdu_max_width &&
metadata->base_size.height <= dev_priv->stdu_max_height)
metadata->flags |= SVGA3D_SURFACE_SCREENTARGET;
/* * From this point, the generic resource management functions * destroy the object on failure.
*/
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
return ret;
out_unlock: return ret;
}
static SVGA3dSurfaceFormat vmw_format_bpp_to_svga(struct vmw_private *vmw, int bpp)
{ switch (bpp) { case 8: /* DRM_FORMAT_C8 */ return SVGA3D_P8; case 16: /* DRM_FORMAT_RGB565 */ return SVGA3D_R5G6B5; case 32: /* DRM_FORMAT_XRGB8888 */ if (has_sm4_context(vmw)) return SVGA3D_B8G8R8X8_UNORM; return SVGA3D_X8R8G8B8; default:
drm_warn(&vmw->drm, "Unsupported format bpp: %d\n", bpp); return SVGA3D_X8R8G8B8;
}
}
/** * vmw_dumb_create - Create a dumb kms buffer * * @file_priv: Pointer to a struct drm_file identifying the caller. * @dev: Pointer to the drm device. * @args: Pointer to a struct drm_mode_create_dumb structure * Return: Zero on success, negative error code on failure. * * This is a driver callback for the core drm create_dumb functionality. * Note that this is very similar to the vmw_bo_alloc ioctl, except * that the arguments have a different format.
*/ int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args)
{ struct vmw_private *dev_priv = vmw_priv(dev); struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_bo *vbo = NULL; struct vmw_resource *res = NULL; union drm_vmw_gb_surface_create_ext_arg arg = { 0 }; struct drm_vmw_gb_surface_create_ext_req *req = &arg.req; int ret; struct drm_vmw_size drm_size = {
.width = args->width,
.height = args->height,
.depth = 1,
};
SVGA3dSurfaceFormat format = vmw_format_bpp_to_svga(dev_priv, args->bpp); conststruct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
SVGA3D_SURFACE_HINT_RENDERTARGET |
SVGA3D_SURFACE_SCREENTARGET;
if (vmw_surface_is_dx_screen_target_format(format)) {
flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
SVGA3D_SURFACE_BIND_RENDER_TARGET;
}
/* * Without mob support we're just going to use raw memory buffer * because we wouldn't be able to support full surface coherency * without mobs. There also no reason to support surface coherency * without 3d (i.e. gpu usage on the host) because then all the * contents is going to be rendered guest side.
*/ if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) { int cpp = DIV_ROUND_UP(args->bpp, 8);
switch (cpp) { case 1: /* DRM_FORMAT_C8 */ case 2: /* DRM_FORMAT_RGB565 */ case 4: /* DRM_FORMAT_XRGB8888 */ break; default: /* * Dumb buffers don't allow anything else. * This is tested via IGT's dumb_buffers
*/ return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.