/* Base struct for all objects with a userspace ID handle. */ struct iommufd_object { /* * Destroy will sleep and wait for wait_cnt to go to zero. This allows * concurrent users of the ID to reliably avoid causing a spurious * destroy failure. Incrementing this count should either be short * lived or be revoked and blocked during pre_destroy().
*/
refcount_t wait_cnt;
refcount_t users; enum iommufd_object_type type; unsignedint id;
};
enum {
IOMMUFD_ACCESS_RW_READ = 0,
IOMMUFD_ACCESS_RW_WRITE = 1 << 0, /* Set if the caller is in a kthread then rw will use kthread_use_mm() */
IOMMUFD_ACCESS_RW_KTHREAD = 1 << 1,
/* Only for use by selftest */
__IOMMUFD_ACCESS_RW_SLOW_PATH = 1 << 2,
};
u64 base_addr; /* in guest physical address space */
size_t length;
enum iommu_hw_queue_type type;
/* Clean up all driver-specific parts of an iommufd_hw_queue */ void (*destroy)(struct iommufd_hw_queue *hw_queue);
};
/** * struct iommufd_viommu_ops - vIOMMU specific operations * @destroy: Clean up all driver-specific parts of an iommufd_viommu. The memory * of the vIOMMU will be free-ed by iommufd core after calling this op * @alloc_domain_nested: Allocate a IOMMU_DOMAIN_NESTED on a vIOMMU that holds a * nesting parent domain (IOMMU_DOMAIN_PAGING). @user_data * must be defined in include/uapi/linux/iommufd.h. * It must fully initialize the new iommu_domain before * returning. Upon failure, ERR_PTR must be returned. * @cache_invalidate: Flush hardware cache used by a vIOMMU. It can be used for * any IOMMU hardware specific cache: TLB and device cache. * The @array passes in the cache invalidation requests, in * form of a driver data structure. A driver must update the * array->entry_num to report the number of handled requests. * The data structure of the array entry must be defined in * include/uapi/linux/iommufd.h * @vdevice_size: Size of the driver-defined vDEVICE structure per this vIOMMU * @vdevice_init: Initialize the driver-level structure of a vDEVICE object, or * related HW procedure. @vdev is already initialized by iommufd * core: vdev->dev and vdev->viommu pointers; vdev->id carries a * per-vIOMMU virtual ID (refer to struct iommu_vdevice_alloc in * include/uapi/linux/iommufd.h) * If driver has a deinit function to revert what vdevice_init op * does, it should set it to the @vdev->destroy function pointer * @get_hw_queue_size: Get the size of a driver-defined HW queue structure for a * given @viommu corresponding to @queue_type. Driver should * return 0 if HW queue aren't supported accordingly. It is * required for driver to use the HW_QUEUE_STRUCT_SIZE macro * to sanitize the driver-level HW queue structure related * to the core one * @hw_queue_init_phys: Initialize the driver-level structure of a HW queue that * is initialized with its core-level structure that holds * all the info about a guest queue memory. * Driver providing this op indicates that HW accesses the * guest queue memory via physical addresses. * @index carries the logical HW QUEUE ID per vIOMMU in a * guest VM, for a multi-queue model. @base_addr_pa carries * the physical location of the guest queue * If driver has a deinit function to revert what this op * does, it should set it to the @hw_queue->destroy pointer
*/ struct iommufd_viommu_ops { void (*destroy)(struct iommufd_viommu *viommu); struct iommu_domain *(*alloc_domain_nested)( struct iommufd_viommu *viommu, u32 flags, conststruct iommu_user_data *user_data); int (*cache_invalidate)(struct iommufd_viommu *viommu, struct iommu_user_data_array *array); const size_t vdevice_size; int (*vdevice_init)(struct iommufd_vdevice *vdev);
size_t (*get_hw_queue_size)(struct iommufd_viommu *viommu, enum iommu_hw_queue_type queue_type); /* AMD's HW will add hw_queue_init simply using @hw_queue->base_addr */ int (*hw_queue_init_phys)(struct iommufd_hw_queue *hw_queue, u32 index,
phys_addr_t base_addr_pa);
};
/* * Helpers for IOMMU driver to build/destroy a dependency between two sibling * structures created by one of the allocators above
*/ #define iommufd_hw_queue_depend(dependent, depended, member) \
({ \ int ret = -EINVAL; \
\
static_assert(__same_type(struct iommufd_hw_queue, \
dependent->member)); \
static_assert(__same_type(typeof(*dependent), *depended)); \ if (!WARN_ON_ONCE(dependent->member.viommu != \
depended->member.viommu)) \
ret = _iommufd_object_depend(&dependent->member.obj, \
&depended->member.obj); \
ret; \
})
/* * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure. * * To support an mmappable MMIO region, kernel driver must first register it to * iommufd core to allocate an @offset, during a driver-structure initialization * (e.g. viommu_init op). Then, it should report to user space this @offset and * the @length of the MMIO region for mmap syscall.
*/ staticinlineint iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu,
phys_addr_t mmio_addr,
size_t length, unsignedlong *offset)
{ return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr,
length, offset);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.