/* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
**************************************************************************/
/* * FIXME: vmwgfx_drm.h needs to be last due to dependencies. * uapi headers should not depend on header files outside uapi/.
*/ #include <drm/vmwgfx_drm.h>
/** * struct vmw-resource - base class for hardware resources * * @kref: For refcounting. * @dev_priv: Pointer to the device private for this resource. Immutable. * @id: Device id. Protected by @dev_priv::resource_lock. * @guest_memory_size: Guest memory buffer size. Immutable. * @res_dirty: Resource contains data not yet in the guest memory buffer. * Protected by resource reserved. * @guest_memory_dirty: Guest memory buffer contains data not yet in the HW * resource. Protected by resource reserved. * @coherent: Emulate coherency by tracking vm accesses. * @guest_memory_bo: The guest memory buffer if any. Protected by resource * reserved. * @guest_memory_offset: Offset into the guest memory buffer if any. Protected * by resource reserved. Note that only a few resource types can have a * @guest_memory_offset different from zero. * @pin_count: The pin count for this resource. A pinned resource has a * pin-count greater than zero. It is not on the resource LRU lists and its * guest memory buffer is pinned. Hence it can't be evicted. * @func: Method vtable for this resource. Immutable. * @mob_node; Node for the MOB guest memory rbtree. Protected by * @guest_memory_bo reserved. * @lru_head: List head for the LRU list. Protected by @dev_priv::resource_lock. * @binding_head: List head for the context binding list. Protected by * the @dev_priv::binding_mutex * @res_free: The resource destructor. * @hw_destroy: Callback to destroy the resource on the device, as part of * resource destruction.
*/ struct vmw_bo; struct vmw_bo; struct vmw_resource_dirty; struct vmw_resource { struct kref kref; struct vmw_private *dev_priv; int id;
u32 used_prio; unsignedlong guest_memory_size;
u32 res_dirty : 1;
u32 guest_memory_dirty : 1;
u32 coherent : 1; struct vmw_bo *guest_memory_bo; unsignedlong guest_memory_offset; unsignedlong pin_count; conststruct vmw_res_func *func; struct rb_node mob_node; struct list_head lru_head; struct list_head binding_head; struct vmw_resource_dirty *dirty; void (*res_free) (struct vmw_resource *res); void (*hw_destroy) (struct vmw_resource *res);
};
/* * Resources that are managed using ioctls.
*/ enum vmw_res_type {
vmw_res_context,
vmw_res_surface,
vmw_res_stream,
vmw_res_shader,
vmw_res_dx_context,
vmw_res_cotable,
vmw_res_view,
vmw_res_streamoutput,
vmw_res_max
};
/* * Resources that are managed using command streams.
*/ enum vmw_cmdbuf_res_type {
vmw_cmdbuf_res_shader,
vmw_cmdbuf_res_view,
vmw_cmdbuf_res_streamoutput
};
/** * struct vmw_surface_metadata - Metadata describing a surface. * * @flags: Device flags. * @format: Surface SVGA3D_x format. * @mip_levels: Mip level for each face. For GB first index is used only. * @multisample_count: Sample count. * @multisample_pattern: Sample patterns. * @quality_level: Quality level. * @autogen_filter: Filter for automatically generated mipmaps. * @array_size: Number of array elements for a 1D/2D texture. For cubemap texture number of faces * array_size. This should be 0 for pre SM4 device. * @buffer_byte_stride: Buffer byte stride. * @num_sizes: Size of @sizes. For GB surface this should always be 1. * @base_size: Surface dimension. * @sizes: Array representing mip sizes. Legacy only. * @scanout: Whether this surface will be used for scanout. * * This tracks metadata for both legacy and guest backed surface.
*/ struct vmw_surface_metadata {
u64 flags;
u32 format;
u32 mip_levels[DRM_VMW_MAX_SURFACE_FACES];
u32 multisample_count;
u32 multisample_pattern;
u32 quality_level;
u32 autogen_filter;
u32 array_size;
u32 num_sizes;
u32 buffer_byte_stride; struct drm_vmw_size base_size; struct drm_vmw_size *sizes; bool scanout;
};
/** * struct vmw_surface: Resource structure for a surface. * * @res: The base resource for this surface. * @metadata: Metadata for this surface resource. * @snooper: Cursor data. Legacy surface only. * @offsets: Legacy surface only. * @view_list: List of views bound to this surface.
*/ struct vmw_surface { struct vmw_resource res; struct vmw_surface_metadata metadata; struct vmw_cursor_snooper snooper; struct vmw_surface_offset *offsets; struct list_head view_list;
};
/** * struct vmw_res_cache_entry - resource information cache entry * @handle: User-space handle of a resource. * @res: Non-ref-counted pointer to the resource. * @valid_handle: Whether the @handle member is valid. * @valid: Whether the entry is valid, which also implies that the execbuf * code holds a reference to the resource, and it's placed on the * validation list. * * Used to avoid frequent repeated user-space handle lookups of the * same resource.
*/ struct vmw_res_cache_entry {
uint32_t handle; struct vmw_resource *res; void *private; unsignedshort valid_handle; unsignedshort valid;
};
/** * enum vmw_dma_map_mode - indicate how to perform TTM page dma mappings.
*/ enum vmw_dma_map_mode {
vmw_dma_alloc_coherent, /* Use TTM coherent pages */
vmw_dma_map_populate, /* Unmap from DMA just after unpopulate */
vmw_dma_map_bind, /* Unmap from DMA just before unbind */
vmw_dma_map_max
};
/** * struct vmw_sg_table - Scatter/gather table for binding, with additional * device-specific information. * * @sgt: Pointer to a struct sg_table with binding information * @num_regions: Number of regions with device-address contiguous pages
*/ struct vmw_sg_table { enum vmw_dma_map_mode mode; struct page **pages; const dma_addr_t *addrs; struct sg_table *sgt; unsignedlong num_pages;
};
/** * struct vmw_piter - Page iterator that iterates over a list of pages * and DMA addresses that could be either a scatter-gather list or * arrays * * @pages: Array of page pointers to the pages. * @addrs: DMA addresses to the pages if coherent pages are used. * @iter: Scatter-gather page iterator. Current position in SG list. * @i: Current position in arrays. * @num_pages: Number of pages total. * @next: Function to advance the iterator. Returns false if past the list * of pages, true otherwise. * @dma_address: Function to return the DMA address of the current page.
*/ struct vmw_piter { struct page **pages; const dma_addr_t *addrs; struct sg_dma_page_iter iter; unsignedlong i; unsignedlong num_pages; bool (*next)(struct vmw_piter *);
dma_addr_t (*dma_address)(struct vmw_piter *);
};
/** * struct vmw_sw_context - Command submission context * @res_ht: Pointer hash table used to find validation duplicates * @kernel: Whether the command buffer originates from kernel code rather * than from user-space * @fp: If @kernel is false, points to the file of the client. Otherwise * NULL * @cmd_bounce: Command bounce buffer used for command validation before * copying to fifo space * @cmd_bounce_size: Current command bounce buffer size * @cur_query_bo: Current buffer object used as query result buffer * @bo_relocations: List of buffer object relocations * @res_relocations: List of resource relocations * @buf_start: Pointer to start of memory where command validation takes * place * @res_cache: Cache of recently looked up resources * @last_query_ctx: Last context that submitted a query * @needs_post_query_barrier: Whether a query barrier is needed after * command submission * @staged_bindings: Cached per-context binding tracker * @staged_bindings_inuse: Whether the cached per-context binding tracker * is in use * @staged_cmd_res: List of staged command buffer managed resources in this * command buffer * @ctx_list: List of context resources referenced in this command buffer * @dx_ctx_node: Validation metadata of the current DX context * @dx_query_mob: The MOB used for DX queries * @dx_query_ctx: The DX context used for the last DX query * @man: Pointer to the command buffer managed resource manager * @ctx: The validation context
*/ struct vmw_sw_context{
DECLARE_HASHTABLE(res_ht, VMW_RES_HT_ORDER); bool kernel; struct vmw_fpriv *fp; struct drm_file *filp;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size; struct vmw_bo *cur_query_bo; struct list_head bo_relocations; struct list_head res_relocations;
uint32_t *buf_start; struct vmw_res_cache_entry res_cache[vmw_res_max]; struct vmw_resource *last_query_ctx; bool needs_post_query_barrier; struct vmw_ctx_binding_state *staged_bindings; bool staged_bindings_inuse; struct list_head staged_cmd_res; struct list_head ctx_list; struct vmw_ctx_validation_info *dx_ctx_node; struct vmw_bo *dx_query_mob; struct vmw_resource *dx_query_ctx; struct vmw_cmdbuf_res_manager *man; struct vmw_validation_context *ctx;
};
struct vmw_legacy_display; struct vmw_overlay;
/* * struct vmw_otable - Guest Memory OBject table metadata * * @size: Size of the table (page-aligned). * @page_table: Pointer to a struct vmw_mob holding the page table.
*/ struct vmw_otable { unsignedlong size; struct vmw_mob *page_table; bool enabled;
};
/** * enum vmw_sm_type - Graphics context capability supported by device. * @VMW_SM_LEGACY: Pre DX context. * @VMW_SM_4: Context support upto SM4. * @VMW_SM_4_1: Context support upto SM4_1. * @VMW_SM_5: Context support up to SM5. * @VMW_SM_5_1X: Adds support for sm5_1 and gl43 extensions. * @VMW_SM_MAX: Should be the last.
*/ enum vmw_sm_type {
VMW_SM_LEGACY = 0,
VMW_SM_4,
VMW_SM_4_1,
VMW_SM_5,
VMW_SM_5_1X,
VMW_SM_MAX
};
/* * Surface swapping. The "surface_lru" list is protected by the * resource lock in order to be able to destroy a surface and take * it off the lru atomically. "used_memory_size" is currently * protected by the cmdbuf mutex for simplicity.
*/
/* * The locking here is fine-grained, so that it is performed once * for every read- and write operation. This is of course costly, but we * don't perform much register access in the timing critical paths anyway. * Instead we have the extra benefit of being sure that we don't forget * the hw lock around register accesses.
*/ staticinlinevoid vmw_write(struct vmw_private *dev_priv, unsignedint offset, uint32_t value)
{ if (vmw_is_svga_v3(dev_priv)) {
iowrite32(value, dev_priv->rmmio + offset);
} else {
spin_lock(&dev_priv->hw_lock);
outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
outl(value, dev_priv->io_start + SVGA_VALUE_PORT);
spin_unlock(&dev_priv->hw_lock);
}
}
/** * vmw_resource_mob_attached - Whether a resource currently has a mob attached * @res: The resource * * Return: true if the resource has a mob attached, false otherwise.
*/ staticinlinebool vmw_resource_mob_attached(conststruct vmw_resource *res)
{ return !RB_EMPTY_NODE(&res->mob_node);
}
/** * vmw_piter_next - Advance the iterator one page. * * @viter: Pointer to the iterator to advance. * * Returns false if past the list of pages, true otherwise.
*/ staticinlinebool vmw_piter_next(struct vmw_piter *viter)
{ return viter->next(viter);
}
/** * vmw_piter_dma_addr - Return the DMA address of the current page. * * @viter: Pointer to the iterator * * Returns the DMA address of the page pointed to by @viter.
*/ staticinline dma_addr_t vmw_piter_dma_addr(struct vmw_piter *viter)
{ return viter->dma_address(viter);
}
/** * vmw_piter_page - Return a pointer to the current page. * * @viter: Pointer to the iterator * * Returns the DMA address of the page pointed to by @viter.
*/ staticinlinestruct page *vmw_piter_page(struct vmw_piter *viter)
{ return viter->pages[viter->i];
}
int vmw_overlay_init(struct vmw_private *dev_priv); int vmw_overlay_close(struct vmw_private *dev_priv); int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vmw_overlay_resume_all(struct vmw_private *dev_priv); int vmw_overlay_pause_all(struct vmw_private *dev_priv); int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out); int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id); int vmw_overlay_num_overlays(struct vmw_private *dev_priv); int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
/** * GMR Id manager
*/
int vmw_gmrid_man_init(struct vmw_private *dev_priv, int type); void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type);
/** * System memory manager
*/ int vmw_sys_man_init(struct vmw_private *dev_priv); void vmw_sys_man_fini(struct vmw_private *dev_priv);
/** * struct vmw_diff_cpy - CPU blit information structure * * @rect: The output bounding box rectangle. * @line: The current line of the blit. * @line_offset: Offset of the current line segment. * @cpp: Bytes per pixel (granularity information). * @memcpy: Which memcpy function to use.
*/ struct vmw_diff_cpy { struct drm_rect rect;
size_t line;
size_t line_offset; int cpp; void (*do_cpy)(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
size_t n);
};
int vmw_mksstat_reset_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vmw_mksstat_add_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vmw_mksstat_remove_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vmw_mksstat_remove_all(struct vmw_private *dev_priv);
/* VMW logging */
/** * VMW_DEBUG_USER - Debug output for user-space debugging. * * @fmt: printf() like format string. * * This macro is for logging user-space error and debugging messages for e.g. * command buffer execution errors due to malformed commands, invalid context, * etc.
*/ #define VMW_DEBUG_USER(fmt, ...) \
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
/** * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory * * @fifo_reg: The fifo register to read from * * This function is intended to be equivalent to ioread32() on * memremap'd memory, but without byteswapping.
*/ staticinline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
BUG_ON(vmw_is_svga_v3(vmw)); return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
}
/** * vmw_fifo_mem_write - Perform a MMIO write to volatile memory * * @addr: The fifo register to write to * * This function is intended to be equivalent to iowrite32 on * memremap'd memory, but without byteswapping.
*/ staticinlinevoid vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
u32 value)
{
BUG_ON(vmw_is_svga_v3(vmw));
WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.