switch (tail->status) { case VIRTIO_IOMMU_S_OK: return 0; case VIRTIO_IOMMU_S_UNSUPP: return -ENOSYS; case VIRTIO_IOMMU_S_INVAL: return -EINVAL; case VIRTIO_IOMMU_S_RANGE: return -ERANGE; case VIRTIO_IOMMU_S_NOENT: return -ENOENT; case VIRTIO_IOMMU_S_FAULT: return -EFAULT; case VIRTIO_IOMMU_S_NOMEM: return -ENOMEM; case VIRTIO_IOMMU_S_IOERR: case VIRTIO_IOMMU_S_DEVERR: default: return -EIO;
}
}
staticvoid viommu_set_req_status(void *buf, size_t len, int status)
{ struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
if (req->type == VIRTIO_IOMMU_T_PROBE) return len - viommu->probe_size - tail_size;
return len - tail_size;
}
/* * __viommu_sync_req - Complete all in-flight requests * * Wait for all added requests to complete. When this function returns, all * requests that were in-flight at the time of the call have completed.
*/ staticint __viommu_sync_req(struct viommu_dev *viommu)
{ unsignedint len;
size_t write_len; struct viommu_request *req; struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
assert_spin_locked(&viommu->request_lock);
virtqueue_kick(vq);
while (!list_empty(&viommu->requests)) {
len = 0;
req = virtqueue_get_buf(vq, &len); if (!req) continue;
if (!len)
viommu_set_req_status(req->buf, req->len,
VIRTIO_IOMMU_S_IOERR);
write_len = req->len - req->write_offset; if (req->writeback && len == write_len)
memcpy(req->writeback, req->buf + req->write_offset,
write_len);
list_del(&req->list);
kfree(req);
}
return 0;
}
staticint viommu_sync_req(struct viommu_dev *viommu)
{ int ret; unsignedlong flags;
spin_lock_irqsave(&viommu->request_lock, flags);
ret = __viommu_sync_req(viommu); if (ret)
dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
spin_unlock_irqrestore(&viommu->request_lock, flags);
return ret;
}
/* * __viommu_add_request - Add one request to the queue * @buf: pointer to the request buffer * @len: length of the request buffer * @writeback: copy data back to the buffer when the request completes. * * Add a request to the queue. Only synchronize the queue if it's already full. * Otherwise don't kick the queue nor wait for requests to complete. * * When @writeback is true, data written by the device, including the request * status, is copied into @buf after the request completes. This is unsafe if * the caller allocates @buf on stack and drops the lock between add_req() and * sync_req(). * * Return 0 if the request was successfully added to the queue.
*/ staticint __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len, bool writeback)
{ int ret;
off_t write_offset; struct viommu_request *req; struct scatterlist top_sg, bottom_sg; struct scatterlist *sg[2] = { &top_sg, &bottom_sg }; struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
sg_init_one(&top_sg, req->buf, write_offset);
sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC); if (ret == -ENOSPC) { /* If the queue is full, sync and retry */ if (!__viommu_sync_req(viommu))
ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
} if (ret) goto err_free;
spin_lock_irqsave(&viommu->request_lock, flags);
ret = __viommu_add_req(viommu, buf, len, false); if (ret)
dev_dbg(viommu->dev, "could not add request: %d\n", ret);
spin_unlock_irqrestore(&viommu->request_lock, flags);
return ret;
}
/* * Send a request and wait for it to complete. Return the request status (as an * errno)
*/ staticint viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
size_t len)
{ int ret; unsignedlong flags;
spin_lock_irqsave(&viommu->request_lock, flags);
ret = __viommu_add_req(viommu, buf, len, true); if (ret) {
dev_dbg(viommu->dev, "could not add request (%d)\n", ret); goto out_unlock;
}
ret = __viommu_sync_req(viommu); if (ret) {
dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret); /* Fall-through (get the actual request status) */
}
ret = viommu_get_req_errno(buf, len);
out_unlock:
spin_unlock_irqrestore(&viommu->request_lock, flags); return ret;
}
for (i = 0; i < fwspec->num_ids; i++) {
req->endpoint = cpu_to_le32(fwspec->ids[i]);
ret = viommu_send_req_sync(viommu, req, sizeof(*req)); if (ret) return ret;
} return 0;
}
/* * viommu_add_mapping - add a mapping to the internal tree * * On success, return the new mapping. Otherwise return NULL.
*/ staticint viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
phys_addr_t paddr, u32 flags)
{ unsignedlong irqflags; struct viommu_mapping *mapping;
mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC); if (!mapping) return -ENOMEM;
/* * viommu_del_mappings - remove mappings from the internal tree * * @vdomain: the domain * @iova: start of the range * @end: end of the range * * On success, returns the number of unmapped bytes
*/ static size_t viommu_del_mappings(struct viommu_domain *vdomain,
u64 iova, u64 end)
{
size_t unmapped = 0; unsignedlong flags; struct viommu_mapping *mapping = NULL; struct interval_tree_node *node, *next;
spin_lock_irqsave(&vdomain->mappings_lock, flags);
next = interval_tree_iter_first(&vdomain->mappings, iova, end); while (next) {
node = next;
mapping = container_of(node, struct viommu_mapping, iova);
next = interval_tree_iter_next(node, iova, end);
/* Trying to split a mapping? */ if (mapping->iova.start < iova) break;
/* * Virtio-iommu doesn't allow UNMAP to split a mapping created * with a single MAP request, so remove the full mapping.
*/
unmapped += mapping->iova.last - mapping->iova.start + 1;
/* * viommu_replay_mappings - re-send MAP requests * * When reattaching a domain that was previously detached from all endpoints, * mappings were deleted from the device. Re-create the mappings available in * the internal tree.
*/ staticint viommu_replay_mappings(struct viommu_domain *vdomain)
{ int ret = 0; unsignedlong flags; struct viommu_mapping *mapping; struct interval_tree_node *node; struct virtio_iommu_req_map map;
probe->head.type = VIRTIO_IOMMU_T_PROBE; /* * For now, assume that properties of an endpoint that outputs multiple * IDs are consistent. Only probe the first one.
*/
probe->endpoint = cpu_to_le32(fwspec->ids[0]);
ret = viommu_send_req_sync(viommu, probe, probe_len); if (ret) goto out_free;
prop = (void *)probe->properties;
type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
cur < viommu->probe_size) {
len = le16_to_cpu(prop->length) + sizeof(*prop);
switch (type) { case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
ret = viommu_add_resv_mem(vdev, (void *)prop, len); break; default:
dev_err(dev, "unknown viommu prop 0x%x\n", type);
}
if (ret)
dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
if (vdomain->viommu != vdev->viommu) return -EINVAL;
/* * In the virtio-iommu device, when attaching the endpoint to a new * domain, it is detached from the old one and, if as a result the * old domain isn't attached to any endpoint, all mappings are removed * from the old domain and it is freed. * * In the driver the old domain still exists, and its mappings will be * recreated if it gets reattached to an endpoint. Otherwise it will be * freed explicitly. * * vdev->vdomain is protected by group->mutex
*/ if (vdev->vdomain)
vdev->vdomain->nr_endpoints--;
ret = viommu_send_attach_req(vdomain->viommu, dev, &req); if (ret) return ret;
if (!vdomain->nr_endpoints) { /* * This endpoint is the first to be attached to the domain. * Replay existing mappings (e.g. SW MSI).
*/
ret = viommu_replay_mappings(vdomain); if (ret) return ret;
}
/* * May be called before the viommu is initialized including * while creating direct mapping
*/ if (!vdomain->nr_endpoints) return 0; return viommu_sync_req(vdomain->viommu);
}
/* * May be called before the viommu is initialized including * while creating direct mapping
*/ if (!vdomain->nr_endpoints) return;
viommu_sync_req(vdomain->viommu);
}
/* * If the device didn't register any bypass MSI window, add a * software-mapped region.
*/ if (!msi) {
msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
prot, IOMMU_RESV_SW_MSI,
GFP_KERNEL); if (!msi) return;
for (i = 0; i < nr_evts; i++) {
sg_init_one(sg, &evts[i], sizeof(*evts));
ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL); if (ret) return ret;
}
if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
/* Reserve an ID to use as the bypass domain */ if (virtio_has_feature(viommu->vdev, VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
viommu->identity_domain_id = viommu->first_domain;
viommu->first_domain++;
}
virtio_device_ready(vdev);
/* Populate the event queue with buffers */
ret = viommu_fill_evtq(viommu); if (ret) goto err_free_vqs;
ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
virtio_bus_name(vdev)); if (ret) goto err_free_vqs;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.