/* * Copyright 2005 Stephane Marchesin. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE.
*/
/* * NOUVEAU_GETPARAM_EXEC_PUSH_MAX - query max pushes through getparam * * Query the maximum amount of IBs that can be pushed through a single * &drm_nouveau_exec structure and hence a single &DRM_IOCTL_NOUVEAU_EXEC * ioctl().
*/ #define NOUVEAU_GETPARAM_EXEC_PUSH_MAX 17
/* * NOUVEAU_GETPARAM_VRAM_BAR_SIZE - query bar size * * Query the VRAM BAR size.
*/ #define NOUVEAU_GETPARAM_VRAM_BAR_SIZE 18
/* * NOUVEAU_GETPARAM_HAS_VMA_TILEMODE * * Query whether tile mode and PTE kind are accepted with VM allocs or not.
*/ #define NOUVEAU_GETPARAM_HAS_VMA_TILEMODE 20
/* * Those are used to support selecting the main engine used on Kepler. * This goes into drm_nouveau_channel_alloc::tt_ctxdma_handle
*/ #define NOUVEAU_FIFO_ENGINE_GR 0x01 #define NOUVEAU_FIFO_ENGINE_VP 0x02 #define NOUVEAU_FIFO_ENGINE_PPP 0x04 #define NOUVEAU_FIFO_ENGINE_BSP 0x08 #define NOUVEAU_FIFO_ENGINE_CE 0x30
/** * struct drm_nouveau_sync - sync object * * This structure serves as synchronization mechanism for (potentially) * asynchronous operations such as EXEC or VM_BIND.
*/ struct drm_nouveau_sync { /** * @flags: the flags for a sync object * * The first 8 bits are used to determine the type of the sync object.
*/
__u32 flags; #define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0 #define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1 #define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf /** * @handle: the handle of the sync object
*/
__u32 handle; /** * @timeline_value: * * The timeline point of the sync object in case the syncobj is of * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
};
/** * struct drm_nouveau_vm_init - GPU VA space init structure * * Used to initialize the GPU's VA space for a user client, telling the kernel * which portion of the VA space is managed by the UMD and kernel respectively. * * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails * with -ENOSYS.
*/ struct drm_nouveau_vm_init { /** * @kernel_managed_addr: start address of the kernel managed VA space * region
*/
__u64 kernel_managed_addr; /** * @kernel_managed_size: size of the kernel managed VA space region in * bytes
*/
__u64 kernel_managed_size;
};
/** * struct drm_nouveau_vm_bind_op - VM_BIND operation * * This structure represents a single VM_BIND operation. UMDs should pass * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
*/ struct drm_nouveau_vm_bind_op { /** * @op: the operation type * * Supported values: * * %DRM_NOUVEAU_VM_BIND_OP_MAP - Map a GEM object to the GPU's VA * space. Optionally, the &DRM_NOUVEAU_VM_BIND_SPARSE flag can be * passed to instruct the kernel to create sparse mappings for the * given range. * * %DRM_NOUVEAU_VM_BIND_OP_UNMAP - Unmap an existing mapping in the * GPU's VA space. If the region the mapping is located in is a * sparse region, new sparse mappings are created where the unmapped * (memory backed) mapping was mapped previously. To remove a sparse * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
*/
__u32 op; #define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0 #define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1 /** * @flags: the flags for a &drm_nouveau_vm_bind_op * * Supported values: * * %DRM_NOUVEAU_VM_BIND_SPARSE - Indicates that an allocated VA * space region should be sparse.
*/
__u32 flags; #define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8) /** * @handle: the handle of the DRM GEM object to map
*/
__u32 handle; /** * @pad: 32 bit padding, should be 0
*/
__u32 pad; /** * @addr: * * the address the VA space region or (memory backed) mapping should be mapped to
*/
__u64 addr; /** * @bo_offset: the offset within the BO backing the mapping
*/
__u64 bo_offset; /** * @range: the size of the requested mapping in bytes
*/
__u64 range;
};
/** * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
*/ struct drm_nouveau_vm_bind { /** * @op_count: the number of &drm_nouveau_vm_bind_op
*/
__u32 op_count; /** * @flags: the flags for a &drm_nouveau_vm_bind ioctl * * Supported values: * * %DRM_NOUVEAU_VM_BIND_RUN_ASYNC - Indicates that the given VM_BIND * operation should be executed asynchronously by the kernel. * * If this flag is not supplied the kernel executes the associated * operations synchronously and doesn't accept any &drm_nouveau_sync * objects.
*/
__u32 flags; #define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1 /** * @wait_count: the number of wait &drm_nouveau_syncs
*/
__u32 wait_count; /** * @sig_count: the number of &drm_nouveau_syncs to signal when finished
*/
__u32 sig_count; /** * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
*/
__u64 wait_ptr; /** * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
*/
__u64 sig_ptr; /** * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
*/
__u64 op_ptr;
};
/** * struct drm_nouveau_exec_push - EXEC push operation * * This structure represents a single EXEC push operation. UMDs should pass an * array of this structure via struct drm_nouveau_exec's &push_ptr field.
*/ struct drm_nouveau_exec_push { /** * @va: the virtual address of the push buffer mapping
*/
__u64 va; /** * @va_len: the length of the push buffer mapping
*/
__u32 va_len; /** * @flags: the flags for this push buffer mapping
*/
__u32 flags; #define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
};
/** * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
*/ struct drm_nouveau_exec { /** * @channel: the channel to execute the push buffer in
*/
__u32 channel; /** * @push_count: the number of &drm_nouveau_exec_push ops
*/
__u32 push_count; /** * @wait_count: the number of wait &drm_nouveau_syncs
*/
__u32 wait_count; /** * @sig_count: the number of &drm_nouveau_syncs to signal when finished
*/
__u32 sig_count; /** * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
*/
__u64 wait_ptr; /** * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
*/
__u64 sig_ptr; /** * @push_ptr: pointer to &drm_nouveau_exec_push ops
*/
__u64 push_ptr;
};
/* * Below is use to validate ioctl argument, userspace can also use it to make * sure that no bit are set beyond known fields for a given kernel version.
*/ #define NOUVEAU_SVM_BIND_VALID_BITS 48 #define NOUVEAU_SVM_BIND_VALID_MASK ((1ULL << NOUVEAU_SVM_BIND_VALID_BITS) - 1)
/* * NOUVEAU_BIND_COMMAND__MIGRATE: synchronous migrate to target memory. * result: number of page successfuly migrate to the target memory.
*/ #define NOUVEAU_SVM_BIND_COMMAND__MIGRATE 0
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.