#ifdef DEBUG /* For development, we want to crash whenever the ring is screwed. */ #define BAD_RING(_vq, fmt, args...) \ do { \
dev_err(&(_vq)->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \
BUG(); \
} while (0) /* Caller is supposed to guarantee no reentry. */ #define START_USE(_vq) \ do { \ if ((_vq)->in_use) \
panic("%s:in_use = %i\n", \
(_vq)->vq.name, (_vq)->in_use); \
(_vq)->in_use = __LINE__; \
} while (0) #define END_USE(_vq) \ do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) #define LAST_ADD_TIME_UPDATE(_vq) \ do { \
ktime_t now = ktime_get(); \
\ /* No kick or get, with .1 second between? Warn. */ \ if ((_vq)->last_add_time_valid) \
WARN_ON(ktime_to_ms(ktime_sub(now, \
(_vq)->last_add_time)) > 100); \
(_vq)->last_add_time = now; \
(_vq)->last_add_time_valid = true; \
} while (0) #define LAST_ADD_TIME_CHECK(_vq) \ do { \ if ((_vq)->last_add_time_valid) { \
WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
(_vq)->last_add_time)) > 100); \
} \
} while (0) #define LAST_ADD_TIME_INVALID(_vq) \
((_vq)->last_add_time_valid = false) #else #define BAD_RING(_vq, fmt, args...) \ do { \
dev_err(&_vq->vq.vdev->dev, \ "%s:"fmt, (_vq)->vq.name, ##args); \
(_vq)->broken = true; \
} while (0) #define START_USE(vq) #define END_USE(vq) #define LAST_ADD_TIME_UPDATE(vq) #define LAST_ADD_TIME_CHECK(vq) #define LAST_ADD_TIME_INVALID(vq) #endif
struct vring_desc_state_split { void *data; /* Data for callback. */
/* Indirect desc table and extra table, if any. These two will be * allocated together. So we won't stress more to the memory allocator.
*/ struct vring_desc *indir_desc;
};
struct vring_desc_state_packed { void *data; /* Data for callback. */
/* Indirect desc table and extra table, if any. These two will be * allocated together. So we won't stress more to the memory allocator.
*/ struct vring_packed_desc *indir_desc;
u16 num; /* Descriptor list length. */
u16 last; /* The last desc state in a list. */
};
struct vring_desc_extra {
dma_addr_t addr; /* Descriptor DMA addr. */
u32 len; /* Descriptor length. */
u16 flags; /* Descriptor flags. */
u16 next; /* The next desc state in a list. */
};
struct vring_virtqueue_split { /* Actual memory layout for this queue. */ struct vring vring;
/* Last written value to avail->flags */
u16 avail_flags_shadow;
/* * Last written value to avail->idx in * guest byte order.
*/
u16 avail_idx_shadow;
/* Head of free buffer list. */ unsignedint free_head; /* Number we've added since last sync. */ unsignedint num_added;
/* Last used index we've seen. * for split ring, it just contains last used index * for packed ring: * bits up to VRING_PACKED_EVENT_F_WRAP_CTR include the last used index. * bits from VRING_PACKED_EVENT_F_WRAP_CTR include the used wrap counter.
*/
u16 last_used_idx;
/* Hint for event idx: already triggered no need to disable. */ bool event_triggered;
union { /* Available for split ring */ struct vring_virtqueue_split split;
/* Available for packed ring */ struct vring_virtqueue_packed packed;
};
/* How to notify other side. FIXME: commonalize hcalls! */ bool (*notify)(struct virtqueue *vq);
/* DMA, allocation, and size information */ bool we_own_ring;
/* Device used for doing DMA */ struct device *dma_dev;
#ifdef DEBUG /* They're supposed to lock for us. */ unsignedint in_use;
/* Figure out if their kicks are too delayed. */ bool last_add_time_valid;
ktime_t last_add_time; #endif
};
staticbool virtqueue_use_indirect(conststruct vring_virtqueue *vq, unsignedint total_sg)
{ /* * If the host supports indirect descriptor tables, and we have multiple * buffers, then go indirect. FIXME: tune this threshold
*/ return (vq->indirect && total_sg > 1 && vq->vq.num_free);
}
/* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. * * If there, the interaction between virtio and DMA API is messy. * * On most systems with virtio, physical addresses match bus addresses, * and it doesn't particularly matter whether we use the DMA API. * * On some systems, including Xen and any system with a physical device * that speaks virtio behind a physical IOMMU, we must use the DMA API * for virtio DMA to work at all. * * On other systems, including SPARC and PPC64, virtio-pci devices are * enumerated as though they are behind an IOMMU, but the virtio host * ignores the IOMMU, so we must either pretend that the IOMMU isn't * there or somehow map everything as the identity. * * For the time being, we preserve historic behavior and bypass the DMA * API. * * TODO: install a per-device DMA ops structure that does the right thing * taking into account all the above quirks, and use the DMA API * unconditionally on data path.
*/
staticbool vring_use_dma_api(conststruct virtio_device *vdev)
{ if (!virtio_has_dma_quirk(vdev)) returntrue;
/* Otherwise, we are left to guess. */ /* * In theory, it's possible to have a buggy QEMU-supposed * emulated Q35 IOMMU and Xen enabled at the same time. On * such a configuration, virtio has never worked and will * not work without an even larger kludge. Instead, enable * the DMA API if we're a Xen guest, which at least allows * all of the sensible Xen configurations to work correctly.
*/ if (xen_domain()) returntrue;
if (queue) {
phys_addr_t phys_addr = virt_to_phys(queue);
*dma_handle = (dma_addr_t)phys_addr;
/* * Sanity check: make sure we dind't truncate * the address. The only arches I can find that * have 64-bit phys_addr_t but 32-bit dma_addr_t * are certain non-highmem MIPS and x86 * configurations, but these configurations * should never allocate physical pages above 32 * bits, so this is fine. Just in case, throw a * warning and abort if we end up with an * unrepresentable address.
*/ if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
free_pages_exact(queue, PAGE_ALIGN(size)); return NULL;
}
} return queue;
}
}
/* * The DMA ops on various arches are rather gnarly right now, and * making all of the arch DMA ops work on the vring device itself * is a mess.
*/ staticstruct device *vring_dma_dev(conststruct vring_virtqueue *vq)
{ return vq->dma_dev;
}
if (!vq->use_dma_api) { /* * If DMA is not used, KMSAN doesn't know that the scatterlist * is initialized by the hardware. Explicitly check/unpoison it * depending on the direction.
*/
kmsan_handle_dma(sg_page(sg), sg->offset, sg->length, direction);
*addr = (dma_addr_t)sg_phys(sg); return 0;
}
/* * We can't use dma_map_sg, because we don't use scatterlists in * the way it expects (we don't guarantee that the scatterlist * will exist for the lifetime of the mapping).
*/
*addr = dma_map_page(vring_dma_dev(vq),
sg_page(sg), sg->offset, sg->length,
direction);
if (dma_mapping_error(vring_dma_dev(vq), *addr)) return -ENOMEM;
/* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue.
*/
gfp &= ~__GFP_HIGHMEM;
if (desc) { /* Use a single buffer which doesn't continue */
indirect = true; /* Set up rest to use this indirect table. */
i = 0;
descs_used = 1;
extra = (struct vring_desc_extra *)&desc[total_sg];
} else {
indirect = false;
desc = vq->split.vring.desc;
extra = vq->split.desc_extra;
i = head;
descs_used = total_sg;
}
if (unlikely(vq->vq.num_free < descs_used)) {
pr_debug("Can't add buf len %i - avail = %i\n",
descs_used, vq->vq.num_free); /* FIXME: for historical reasons, we force a notify here if * there are outgoing parts to the buffer. Presumably the
* host should service the ring ASAP. */ if (out_sgs)
vq->notify(&vq->vq); if (indirect)
kfree(desc);
END_USE(vq); return -ENOSPC;
}
for (n = 0; n < out_sgs; n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr;
u32 len;
if (vring_map_one_sg(vq, sg, DMA_TO_DEVICE, &addr, &len, premapped)) goto unmap_release;
prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping.
*/
i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT,
premapped);
}
} for (; n < (out_sgs + in_sgs); n++) { for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr;
u32 len;
if (vring_map_one_sg(vq, sg, DMA_FROM_DEVICE, &addr, &len, premapped)) goto unmap_release;
prev = i; /* Note that we trust indirect descriptor * table since it use stream DMA mapping.
*/
i = virtqueue_add_desc_split(_vq, desc, extra, i, addr, len,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
premapped);
}
} /* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); if (!indirect && vring_need_unmap_buffer(vq, &extra[prev]))
vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
if (indirect) { /* Now that the indirect table is filled in, map it. */
dma_addr_t addr = vring_map_single(
vq, desc, total_sg * sizeof(struct vring_desc),
DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) goto unmap_release;
/* Store token and indirect buffer state. */
vq->split.desc_state[head].data = data; if (indirect)
vq->split.desc_state[head].indir_desc = desc; else
vq->split.desc_state[head].indir_desc = ctx;
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
/* Descriptors and available array need to be set before we expose the
* new available array entries. */
virtio_wmb(vq->weak_barriers);
vq->split.avail_idx_shadow++;
vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
vq->split.avail_idx_shadow);
vq->num_added++;
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
/* This is very unlikely, but theoretically possible. Kick
* just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1))
virtqueue_kick(_vq);
return 0;
unmap_release:
err_idx = i;
if (indirect)
i = 0; else
i = head;
for (n = 0; n < total_sg; n++) { if (i == err_idx) break;
if (unlikely(i >= vq->split.vring.num)) {
BAD_RING(vq, "id %u out of range\n", i); return NULL;
} if (unlikely(!vq->split.desc_state[i].data)) {
BAD_RING(vq, "id %u is not a head!\n", i); return NULL;
}
/* detach_buf_split clears data, so grab it now. */
ret = vq->split.desc_state[i].data;
detach_buf_split(vq, i, ctx);
vq->last_used_idx++; /* If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before
* the read in the next get_buf call. */ if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
virtio_store_mb(vq->weak_barriers,
&vring_used_event(&vq->split.vring),
cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
/* * If device triggered an event already it won't trigger one again: * no need to disable.
*/ if (vq->event_triggered) return;
if (vq->event) /* TODO: this is a hack. Figure out a cleaner value to write. */
vring_used_event(&vq->split.vring) = 0x0; else
vq->split.vring.avail->flags =
cpu_to_virtio16(_vq->vdev,
vq->split.avail_flags_shadow);
}
}
/* We optimistically turn back on interrupts, then check if there was
* more to do. */ /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next
* entry. Always do both to keep code simple. */ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event)
vq->split.vring.avail->flags =
cpu_to_virtio16(_vq->vdev,
vq->split.avail_flags_shadow);
}
vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
last_used_idx = vq->last_used_idx);
END_USE(vq); return last_used_idx;
}
/* We optimistically turn back on interrupts, then check if there was
* more to do. */ /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to * either clear the flags bit or point the event index at the next
* entry. Always update the event index to keep code simple. */ if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event)
vq->split.vring.avail->flags =
cpu_to_virtio16(_vq->vdev,
vq->split.avail_flags_shadow);
} /* TODO: tune this threshold */
bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
for (i = 0; i < vq->split.vring.num; i++) { if (!vq->split.desc_state[i].data) continue; /* detach_buf_split clears data, so grab it now. */
buf = vq->split.desc_state[i].data;
detach_buf_split(vq, i, NULL);
vq->split.avail_idx_shadow--;
vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
vq->split.avail_idx_shadow);
END_USE(vq); return buf;
} /* That should have freed everything. */
BUG_ON(vq->vq.num_free != vq->split.vring.num);
/* No callback? Tell other side not to bother us. */ if (!vq->vq.callback) {
vring_split->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; if (!vq->event)
vring_split->vring.avail->flags = cpu_to_virtio16(vdev,
vring_split->avail_flags_shadow);
}
}
staticvoid virtqueue_reinit_split(struct vring_virtqueue *vq)
{ int num;
/* We assume num is a power of 2. */ if (!is_power_of_2(num)) {
dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); return -EINVAL;
}
/* TODO: allocate each queue chunk individually */ for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr,
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
dma_dev); if (queue) break; if (!may_reduce_num) return -ENOMEM;
}
if (!num) return -ENOMEM;
if (!queue) { /* Try to get a single page. You are my only hope! */
queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
&dma_addr, GFP_KERNEL | __GFP_ZERO,
dma_dev);
} if (!queue) return -ENOMEM;
staticstruct vring_packed_desc *alloc_indirect_packed(unsignedint total_sg,
gfp_t gfp)
{ struct vring_desc_extra *extra; struct vring_packed_desc *desc; int i, size;
/* * We require lowmem mappings for the descriptors because * otherwise virt_to_phys will give us bogus addresses in the * virtqueue.
*/
gfp &= ~__GFP_HIGHMEM;
/* Now that the indirect table is filled in, map it. */
addr = vring_map_single(vq, desc,
total_sg * sizeof(struct vring_packed_desc),
DMA_TO_DEVICE); if (vring_mapping_error(vq, addr)) goto unmap_release;
/* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising * the list are made available.
*/
virtio_wmb(vq->weak_barriers);
vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
vq->packed.avail_used_flags);
/* We're using some buffers from the free list. */
vq->vq.num_free -= 1;
/* Update free pointer */
n = head + 1; if (n >= vq->packed.vring.num) {
n = 0;
vq->packed.avail_wrap_counter ^= 1;
vq->packed.avail_used_flags ^=
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
vq->packed.next_avail_idx = n;
vq->free_head = vq->packed.desc_extra[id].next;
/* Store token and indirect buffer state. */
vq->packed.desc_state[id].num = 1;
vq->packed.desc_state[id].data = data;
vq->packed.desc_state[id].indir_desc = desc;
vq->packed.desc_state[id].last = id;
vq->num_added += 1;
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return 0;
unmap_release:
err_idx = i;
for (i = 0; i < err_idx; i++)
vring_unmap_extra_packed(vq, &extra[i]);
/* * A driver MUST NOT make the first descriptor in the list * available before all subsequent descriptors comprising * the list are made available.
*/
virtio_wmb(vq->weak_barriers);
vq->packed.vring.desc[head].flags = head_flags;
vq->num_added += descs_used;
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return 0;
unmap_release:
err_idx = i;
i = head;
curr = vq->free_head;
vq->packed.avail_used_flags = avail_used_flags;
for (n = 0; n < total_sg; n++) { if (i == err_idx) break;
vring_unmap_extra_packed(vq, &vq->packed.desc_extra[curr]);
curr = vq->packed.desc_extra[curr].next;
i++; if (i >= vq->packed.vring.num)
i = 0;
}
if (unlikely(id >= vq->packed.vring.num)) {
BAD_RING(vq, "id %u out of range\n", id); return NULL;
} if (unlikely(!vq->packed.desc_state[id].data)) {
BAD_RING(vq, "id %u is not a head!\n", id); return NULL;
}
/* detach_buf_packed clears data, so grab it now. */
ret = vq->packed.desc_state[id].data;
detach_buf_packed(vq, id, ctx);
/* * If we expect an interrupt for the next entry, tell host * by writing event index and flush out the write before * the read in the next get_buf call.
*/ if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
virtio_store_mb(vq->weak_barriers,
&vq->packed.vring.driver->off_wrap,
cpu_to_le16(vq->last_used_idx));
/* * We optimistically turn back on interrupts, then check if there was * more to do.
*/
if (vq->event) {
vq->packed.vring.driver->off_wrap =
cpu_to_le16(vq->last_used_idx); /* * We need to update event offset and event wrap * counter first before updating event flags.
*/
virtio_wmb(vq->weak_barriers);
}
for (i = 0; i < vq->packed.vring.num; i++) { if (!vq->packed.desc_state[i].data) continue; /* detach_buf clears data, so grab it now. */
buf = vq->packed.desc_state[i].data;
detach_buf_packed(vq, i, NULL);
END_USE(vq); return buf;
} /* That should have freed everything. */
BUG_ON(vq->vq.num_free != vq->packed.vring.num);
/* No callback? Tell other side not to bother us. */ if (!callback) {
vring_packed->event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
vring_packed->vring.driver->flags =
cpu_to_le16(vring_packed->event_flags_shadow);
}
}
/** * virtqueue_add_sgs - expose buffers to other end * @_vq: the struct virtqueue we're talking about. * @sgs: array of terminated scatterlists. * @out_sgs: the number of scatterlists readable by other side * @in_sgs: the number of scatterlists which are writable (after readable ones) * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). * * NB: ENOSPC is a special code that is only returned on an attempt to add a * buffer to a full VQ. It indicates that some buffers are outstanding and that * the operation can be retried after some buffers have been used.
*/ int virtqueue_add_sgs(struct virtqueue *_vq, struct scatterlist *sgs[], unsignedint out_sgs, unsignedint in_sgs, void *data,
gfp_t gfp)
{ unsignedint i, total_sg = 0;
/* Count them first. */ for (i = 0; i < out_sgs + in_sgs; i++) { struct scatterlist *sg;
/** * virtqueue_add_outbuf - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ int virtqueue_add_outbuf(struct virtqueue *vq, struct scatterlist *sg, unsignedint num, void *data,
gfp_t gfp)
{ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
/** * virtqueue_add_outbuf_premapped - expose output buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg readable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Return: * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ int virtqueue_add_outbuf_premapped(struct virtqueue *vq, struct scatterlist *sg, unsignedint num, void *data,
gfp_t gfp)
{ return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, true, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_outbuf_premapped);
/** * virtqueue_add_inbuf - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ int virtqueue_add_inbuf(struct virtqueue *vq, struct scatterlist *sg, unsignedint num, void *data,
gfp_t gfp)
{ return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
/** * virtqueue_add_inbuf_ctx - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @ctx: extra context for the token * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ int virtqueue_add_inbuf_ctx(struct virtqueue *vq, struct scatterlist *sg, unsignedint num, void *data, void *ctx,
gfp_t gfp)
{ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, false, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
/** * virtqueue_add_inbuf_premapped - expose input buffers to other end * @vq: the struct virtqueue we're talking about. * @sg: scatterlist (must be well-formed and terminated!) * @num: the number of entries in @sg writable by other side * @data: the token identifying the buffer. * @ctx: extra context for the token * @gfp: how to do memory allocations (if necessary). * * Caller must ensure we don't call this with other virtqueue operations * at the same time (except where noted). * * Return: * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
*/ int virtqueue_add_inbuf_premapped(struct virtqueue *vq, struct scatterlist *sg, unsignedint num, void *data, void *ctx,
gfp_t gfp)
{ return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, true, gfp);
}
EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_premapped);
/** * virtqueue_dma_dev - get the dma dev * @_vq: the struct virtqueue we're talking about. * * Returns the dma dev. That can been used for dma api.
*/ struct device *virtqueue_dma_dev(struct virtqueue *_vq)
{ struct vring_virtqueue *vq = to_vvq(_vq);
if (vq->use_dma_api) return vring_dma_dev(vq); else return NULL;
}
EXPORT_SYMBOL_GPL(virtqueue_dma_dev);
/** * virtqueue_kick_prepare - first half of split virtqueue_kick call. * @_vq: the struct virtqueue * * Instead of virtqueue_kick(), you can do: * if (virtqueue_kick_prepare(vq)) * virtqueue_notify(vq); * * This is sometimes useful because the virtqueue_kick_prepare() needs * to be serialized, but the actual virtqueue_notify() call does not.
*/ bool virtqueue_kick_prepare(struct virtqueue *_vq)
{ struct vring_virtqueue *vq = to_vvq(_vq);
/** * virtqueue_notify - second half of split virtqueue_kick call. * @_vq: the struct virtqueue * * This does not need to be serialized. * * Returns false if host notify failed or queue is broken, otherwise true.
*/ bool virtqueue_notify(struct virtqueue *_vq)
{ struct vring_virtqueue *vq = to_vvq(_vq);
if (unlikely(vq->broken)) returnfalse;
/* Prod other side to tell it about changes. */ if (!vq->notify(_vq)) {
vq->broken = true; returnfalse;
} returntrue;
}
EXPORT_SYMBOL_GPL(virtqueue_notify);
/** * virtqueue_kick - update after add_buf * @vq: the struct virtqueue * * After one or more virtqueue_add_* calls, invoke this to kick * the other side. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). * * Returns false if kick failed, otherwise true.
*/ bool virtqueue_kick(struct virtqueue *vq)
{ if (virtqueue_kick_prepare(vq)) return virtqueue_notify(vq); returntrue;
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
/** * virtqueue_get_buf_ctx - get the next used buffer * @_vq: the struct virtqueue we're talking about. * @len: the length written into the buffer * @ctx: extra context for the token * * If the device wrote data into the buffer, @len will be set to the * amount written. This means you don't need to clear the buffer * beforehand to ensure there's no data leakage in the case of short * writes. * * Caller must ensure we don't call this with other virtqueue * operations at the same time (except where noted). *
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.