if (unmask)
vfio_platform_unmask(&vdev->irqs[index]);
}
return 0;
}
/* * The trigger eventfd is guaranteed valid in the interrupt path * and protected by the igate mutex when triggered via ioctl.
*/ staticvoid vfio_send_eventfd(struct vfio_platform_irq *irq_ctx)
{ if (likely(irq_ctx->trigger))
eventfd_signal(irq_ctx->trigger);
}
static irqreturn_t vfio_automasked_irq_handler(int irq, void *dev_id)
{ struct vfio_platform_irq *irq_ctx = dev_id; unsignedlong flags; int ret = IRQ_NONE;
staticint vfio_set_trigger(struct vfio_platform_device *vdev, int index, int fd)
{ struct vfio_platform_irq *irq = &vdev->irqs[index]; struct eventfd_ctx *trigger;
if (irq->trigger) {
disable_irq(irq->hwirq);
eventfd_ctx_put(irq->trigger);
irq->trigger = NULL;
}
if (fd < 0) /* Disable only */ return 0;
trigger = eventfd_ctx_fdget(fd); if (IS_ERR(trigger)) return PTR_ERR(trigger);
irq->trigger = trigger;
/* * irq->masked effectively provides nested disables within the overall * enable relative to trigger. Specifically request_irq() is called * with NO_AUTOEN, therefore the IRQ is initially disabled. The user * may only further disable the IRQ with a MASK operations because * irq->masked is initially false.
*/
enable_irq(irq->hwirq);
/* * For compatibility, errors from request_irq() are local to the * SET_IRQS path and reflected in the name pointer. This allows, * for example, polling mode fallback for an exclusive IRQ failure.
*/ if (IS_ERR(vdev->irqs[index].name)) return PTR_ERR(vdev->irqs[index].name);
switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_MASK:
func = vfio_platform_set_irq_mask; break; case VFIO_IRQ_SET_ACTION_UNMASK:
func = vfio_platform_set_irq_unmask; break; case VFIO_IRQ_SET_ACTION_TRIGGER:
func = vfio_platform_set_irq_trigger; break;
}
ret = request_irq(hwirq, handler, IRQF_NO_AUTOEN,
vdev->irqs[i].name, &vdev->irqs[i]); if (ret) {
kfree(vdev->irqs[i].name);
vdev->irqs[i].name = ERR_PTR(ret);
}
}
vdev->num_irqs = cnt;
return 0;
err: for (--i; i >= 0; i--) { if (!IS_ERR(vdev->irqs[i].name)) {
free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]);
kfree(vdev->irqs[i].name);
}
}
kfree(vdev->irqs); return ret;
}
void vfio_platform_irq_cleanup(struct vfio_platform_device *vdev)
{ int i;
for (i = 0; i < vdev->num_irqs; i++) {
vfio_virqfd_disable(&vdev->irqs[i].mask);
vfio_virqfd_disable(&vdev->irqs[i].unmask); if (!IS_ERR(vdev->irqs[i].name)) {
free_irq(vdev->irqs[i].hwirq, &vdev->irqs[i]); if (vdev->irqs[i].trigger)
eventfd_ctx_put(vdev->irqs[i].trigger);
kfree(vdev->irqs[i].name);
}
}
vdev->num_irqs = 0;
kfree(vdev->irqs);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.11 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.