/** * intel_iommu_drain_pasid_prq - Drain page requests and responses for a pasid * @dev: target device * @pasid: pasid for draining * * Drain all pending page requests and responses related to @pasid in both * software and hardware. This is supposed to be called after the device * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB * and DevTLB have been invalidated. * * It waits until all pending page requests for @pasid in the page fault * queue are completed by the prq handling thread. Then follow the steps * described in VT-d spec CH7.10 to drain all page requests and page * responses pending in the hardware.
*/ void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid)
{ struct device_domain_info *info; struct dmar_domain *domain; struct intel_iommu *iommu; struct qi_desc desc[3]; int head, tail;
u16 sid, did;
info = dev_iommu_priv_get(dev); if (!info->iopf_refcount) return;
/* * Check and wait until all pending page requests in the queue are * handled by the prq handling thread.
*/
prq_retry:
reinit_completion(&iommu->prq_complete);
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; while (head != tail) { struct page_req_dsc *req;
staticint prq_to_iommu_prot(struct page_req_dsc *req)
{ int prot = 0;
if (req->rd_req)
prot |= IOMMU_FAULT_PERM_READ; if (req->wr_req)
prot |= IOMMU_FAULT_PERM_WRITE; if (req->exe_req)
prot |= IOMMU_FAULT_PERM_EXEC; if (req->pm_req)
prot |= IOMMU_FAULT_PERM_PRIV;
/* * Clear PPR bit before reading head/tail registers, to ensure that * we get a new interrupt if needed.
*/
writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
if (unlikely(!is_canonical_address(address))) {
pr_err("IOMMU: %s: Address is not canonical\n",
iommu->name);
bad_req:
handle_bad_prq_event(iommu, req, QI_RESP_INVALID); goto prq_advance;
}
if (unlikely(req->pm_req && (req->rd_req | req->wr_req))) {
pr_err("IOMMU: %s: Page request in Privilege Mode\n",
iommu->name); goto bad_req;
}
if (unlikely(req->exe_req && req->rd_req)) {
pr_err("IOMMU: %s: Execution request not supported\n",
iommu->name); goto bad_req;
}
/* Drop Stop Marker message. No need for a response. */ if (unlikely(req->lpig && !req->rd_req && !req->wr_req)) goto prq_advance;
/* * If prq is to be handled outside iommu driver via receiver of * the fault notifiers, we skip the page response here.
*/
mutex_lock(&iommu->iopf_lock);
dev = device_rbtree_find(iommu, req->rid); if (!dev) {
mutex_unlock(&iommu->iopf_lock); goto bad_req;
}
/* * Clear the page request overflow bit and wake up all threads that * are waiting for the completion of this handling.
*/ if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
pr_info_ratelimited("IOMMU: %s: PRQ overflow detected\n",
iommu->name);
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; if (head == tail) {
iopf_queue_discard_partial(iommu->iopf_queue);
writel(DMA_PRS_PRO, iommu->reg + DMAR_PRS_REG);
pr_info_ratelimited("IOMMU: %s: PRQ overflow cleared",
iommu->name);
}
}
if (!completion_done(&iommu->prq_complete))
complete(&iommu->prq_complete);
return IRQ_RETVAL(handled);
}
int intel_iommu_enable_prq(struct intel_iommu *iommu)
{ struct iopf_queue *iopfq; int irq, ret;
iommu->prq =
iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE); if (!iommu->prq) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name); return -ENOMEM;
}
irq = dmar_alloc_hwirq(IOMMU_IRQ_ID_OFFSET_PRQ + iommu->seq_id, iommu->node, iommu); if (irq <= 0) {
pr_err("IOMMU: %s: Failed to create IRQ vector for page request queue\n",
iommu->name);
ret = -EINVAL; goto free_prq;
}
iommu->pr_irq = irq;
snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name), "dmar%d-iopfq", iommu->seq_id);
iopfq = iopf_queue_alloc(iommu->iopfq_name); if (!iopfq) {
pr_err("IOMMU: %s: Failed to allocate iopf queue\n", iommu->name);
ret = -ENOMEM; goto free_hwirq;
}
iommu->iopf_queue = iopfq;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.