/* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; struct vhost_worker __rcu *worker;
/* The actual ring of buffers. */ struct mutex mutex; unsignedint num;
vring_desc_t __user *desc;
vring_avail_t __user *avail;
vring_used_t __user *used; conststruct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct vhost_vring_call call_ctx; struct eventfd_ctx *error_ctx; struct eventfd_ctx *log_ctx;
struct vhost_poll poll;
/* The routine to call when the Guest pings us, or timeout. */
vhost_work_fn_t handle_kick;
/* Last available index we saw. * Values are limited to 0x7fff, and the high bit is used as
* a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_avail_idx; /* Next avail ring head when VIRTIO_F_IN_ORDER is negoitated */
u16 next_avail_head;
/* Caches available index value from user. */
u16 avail_idx;
/* Last index we used. * Values are limited to 0x7fff, and the high bit is used as
* a wrap counter when using VIRTIO_F_RING_PACKED. */
u16 last_used_idx;
/* Used flags */
u16 used_flags;
/* Last used index value we have signalled on */
u16 signalled_used;
/* Last used index value we have signalled on */ bool signalled_used_valid;
/* Log writes to used structure. */ bool log_used;
u64 log_addr;
/* Ring endianness. Defaults to legacy native endianness.
* Set to true when starting a modern virtio device. */ bool is_le; #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY /* Ring endianness requested by userspace for cross-endian support. */ bool user_be; #endif
u32 busyloop_timeout;
};
struct vhost_dev { struct mm_struct *mm; struct mutex mutex; struct vhost_virtqueue **vqs; int nvqs; struct eventfd_ctx *log_ctx; struct vhost_iotlb *umem; struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock; struct list_head read_list; struct list_head pending_list;
wait_queue_head_t wait; int iov_limit; int weight; int byte_weight; struct xarray worker_xa; bool use_worker; /* * If fork_owner is true we use vhost_tasks to create * the worker so all settings/limits like cgroups, NPROC, * scheduler, etc are inherited from the owner. If false, * we use kthreads and only attach to the same cgroups * as the owner for compat with older kernels. * here we use true as default value. * The default value is set by fork_from_owner_default
*/ bool fork_owner; int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg);
};
bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs, int iov_limit, int weight, int byte_weight, bool use_worker, int (*msg_handler)(struct vhost_dev *dev, u32 asid, struct vhost_iotlb_msg *msg)); long vhost_dev_set_owner(struct vhost_dev *dev); bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); struct vhost_iotlb *vhost_dev_reset_owner_prepare(void); void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb); void vhost_dev_cleanup(struct vhost_dev *); void vhost_dev_stop(struct vhost_dev *); long vhost_dev_ioctl(struct vhost_dev *, unsignedint ioctl, void __user *argp); long vhost_vring_ioctl(struct vhost_dev *d, unsignedint ioctl, void __user *argp); long vhost_worker_ioctl(struct vhost_dev *dev, unsignedint ioctl, void __user *argp); bool vhost_vq_access_ok(struct vhost_virtqueue *vq); bool vhost_log_access_ok(struct vhost_dev *); void vhost_clear_msg(struct vhost_dev *dev);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.