/* kick the remote processor, and let it know which virtqueue to poke at */ staticbool rproc_virtio_notify(struct virtqueue *vq)
{ struct rproc_vring *rvring = vq->priv; struct rproc *rproc = rvring->rvdev->rproc; int notifyid = rvring->notifyid;
/** * rproc_vq_interrupt() - tell remoteproc that a virtqueue is interrupted * @rproc: handle to the remote processor * @notifyid: index of the signalled virtqueue (unique per this @rproc) * * This function should be called by the platform-specific rproc driver, * when the remote processor signals that a specific virtqueue has pending * messages available. * * Return: IRQ_NONE if no message was found in the @notifyid virtqueue, * and otherwise returns IRQ_HANDLED.
*/
irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int notifyid)
{ struct rproc_vring *rvring;
dev_dbg(&rproc->dev, "vq index %d is interrupted\n", notifyid);
rvring = idr_find(&rproc->notifyids, notifyid); if (!rvring || !rvring->vq) return IRQ_NONE;
/* we're temporarily limited to two virtqueues per rvdev */ if (id >= ARRAY_SIZE(rvdev->vring)) return ERR_PTR(-EINVAL);
if (!name) return NULL;
/* Search allocated memory region by name */
mem = rproc_find_carveout_by_name(rproc, "vdev%dvring%d", rvdev->index,
id); if (!mem || !mem->va) return ERR_PTR(-ENOMEM);
rvring = &rvdev->vring[id];
addr = mem->va;
num = rvring->num;
/* * Create the new vq, and tell virtio we're not interested in * the 'weak' smp barriers, since we're talking with a real device.
*/
vq = vring_new_virtqueue(id, num, rvring->align, vdev, false, ctx,
addr, rproc_virtio_notify, callback, name); if (!vq) {
dev_err(dev, "vring_new_virtqueue %s failed\n", name);
rproc_free_vring(rvring); return ERR_PTR(-ENOMEM);
}
/* provide the vdev features as retrieved from the firmware */ static u64 rproc_virtio_get_features(struct virtio_device *vdev)
{ struct rproc_vdev *rvdev = vdev_to_rvdev(vdev); struct fw_rsc_vdev *rsc;
/* * This function is called whenever vdev is released, and is responsible * to decrement the remote processor's refcount which was taken when vdev was * added. * * Never call this function directly; it will be called by the driver * core when needed.
*/ staticvoid rproc_virtio_dev_release(struct device *dev)
{ struct virtio_device *vdev = dev_to_virtio(dev); struct rproc_vdev *rvdev = vdev_to_rvdev(vdev);
/** * rproc_add_virtio_dev() - register an rproc-induced virtio device * @rvdev: the remote vdev * @id: the device type identification (used to match it with a driver). * * This function registers a virtio device. This vdev's partent is * the rproc device. * * Return: 0 on success or an appropriate error value otherwise
*/ staticint rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id)
{ struct rproc *rproc = rvdev->rproc; struct device *dev = &rvdev->pdev->dev; struct virtio_device *vdev; struct rproc_mem_entry *mem; int ret;
if (rproc->ops->kick == NULL) {
ret = -EINVAL;
dev_err(dev, ".kick method not defined for %s\n", rproc->name); goto out;
}
/* Try to find dedicated vdev buffer carveout */
mem = rproc_find_carveout_by_name(rproc, "vdev%dbuffer", rvdev->index); if (mem) {
phys_addr_t pa;
if (mem->of_resm_idx != -1) { struct device_node *np = rproc->dev.parent->of_node;
/* Associate reserved memory to vdev device */
ret = of_reserved_mem_device_init_by_idx(dev, np,
mem->of_resm_idx); if (ret) {
dev_err(dev, "Can't associate reserved memory\n"); goto out;
}
} else { if (mem->va) {
dev_warn(dev, "vdev %d buffer already mapped\n",
rvdev->index);
pa = rproc_va_to_pa(mem->va);
} else { /* Use dma address as carveout no memmapped yet */
pa = (phys_addr_t)mem->dma;
}
/* Associate vdev buffer memory pool to vdev subdev */
ret = dma_declare_coherent_memory(dev, pa,
mem->da,
mem->len); if (ret < 0) {
dev_err(dev, "Failed to associate buffer\n"); goto out;
}
}
} else { struct device_node *np = rproc->dev.parent->of_node;
/* * If we don't have dedicated buffer, just attempt to re-assign * the reserved memory from our parent. A default memory-region * at index 0 from the parent's memory-regions is assigned for * the rvdev dev to allocate from. Failure is non-critical and * the allocations will fall back to global pools, so don't * check return value either.
*/
of_reserved_mem_device_init_by_idx(dev, np, 0);
}
ret = copy_dma_range_map(dev, rproc->dev.parent); if (ret) return ret;
/* Make device dma capable by inheriting from parent's capabilities */
set_dma_ops(dev, get_dma_ops(rproc->dev.parent));
ret = dma_coerce_mask_and_coherent(dev, dma_get_mask(rproc->dev.parent)); if (ret) {
dev_warn(dev, "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
}
/* parse the vrings */ for (i = 0; i < rsc->num_of_vrings; i++) {
ret = rproc_parse_vring(rvdev, rsc, i); if (ret) return ret;
}
/* remember the resource offset*/
rvdev->rsc_offset = rvdev_data->rsc_offset;
/* allocate the vring resources */ for (i = 0; i < rsc->num_of_vrings; i++) {
ret = rproc_alloc_vring(rvdev, i); if (ret) goto unwind_vring_allocations;
}
/* * We're indirectly making a non-temporary copy of the rproc pointer * here, because the platform device or the vdev device will indirectly * access the wrapping rproc. * * Therefore we must increment the rproc refcount here, and decrement * it _only_ on platform remove.
*/
get_device(&rproc->dev);
return 0;
unwind_vring_allocations: for (i--; i >= 0; i--)
rproc_free_vring(&rvdev->vring[i]);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.4Bemerkung:
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.