/* * Intel IOMMU system wide PASID name space:
*/
u32 intel_pasid_max_id = PASID_MAX;
/* * Per device pasid table management:
*/
/* * Allocate a pasid table for @dev. It should be called in a * single-thread context.
*/ int intel_pasid_alloc_table(struct device *dev)
{ struct device_domain_info *info; struct pasid_table *pasid_table; struct pasid_dir_entry *dir;
u32 max_pasid = 0; int order, size;
might_sleep();
info = dev_iommu_priv_get(dev); if (WARN_ON(!info || !dev_is_pci(dev))) return -ENODEV; if (WARN_ON(info->pasid_table)) return -EEXIST;
pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL); if (!pasid_table) return -ENOMEM;
if (info->pasid_supported)
max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)),
intel_pasid_max_id);
size = max_pasid >> (PASID_PDE_SHIFT - 3);
order = size ? get_order(size) : 0;
dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
1 << (order + PAGE_SHIFT)); if (!dir) {
kfree(pasid_table); return -ENOMEM;
}
dir = pasid_table->table;
info = dev_iommu_priv_get(dev);
dir_index = pasid >> PASID_PDE_SHIFT;
index = pasid & PASID_PTE_MASK;
retry:
entries = get_pasid_table_from_pde(&dir[dir_index]); if (!entries) {
u64 tmp;
entries = iommu_alloc_pages_node_sz(info->iommu->node,
GFP_ATOMIC, SZ_4K); if (!entries) return NULL;
/* * The pasid directory table entry won't be freed after * allocation. No worry about the race with free and * clear. However, this entry might be populated by others * while we are preparing it. Use theirs with a retry.
*/
tmp = 0ULL; if (!try_cmpxchg64(&dir[dir_index].val, &tmp,
(u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) {
iommu_free_pages(entries); goto retry;
} if (!ecap_coherent(info->iommu->ecap)) {
clflush_cache_range(entries, VTD_PAGE_SIZE);
clflush_cache_range(&dir[dir_index].val, sizeof(*dir));
}
}
/* * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID), * devTLB flush w/o PASID should be used. For non-zero PASID under * SVA usage, device could do DMA with multiple PASIDs. It is more * efficient to flush devTLB specific to the PASID.
*/ if (pasid == IOMMU_NO_PASID)
qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); else
qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT);
}
if (!pasid_pte_is_present(pte)) { if (!pasid_pte_is_fault_disabled(pte)) {
WARN_ON(READ_ONCE(pte->val[0]) != 0);
spin_unlock(&iommu->lock); return;
}
/* * When a PASID is used for SVA by a device, it's possible * that the pasid entry is non-present with the Fault * Processing Disabled bit set. Clear the pasid entry and * drain the PRQ for the PASID before return.
*/
pasid_clear_entry(pte);
spin_unlock(&iommu->lock);
intel_iommu_drain_pasid_prq(dev, pasid);
return;
}
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte);
intel_pasid_clear_entry(dev, pasid, fault_ignore);
spin_unlock(&iommu->lock);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
devtlb_invalidation_with_pasid(iommu, dev, pasid); if (!fault_ignore)
intel_iommu_drain_pasid_prq(dev, pasid);
}
/* * This function flushes cache for a newly setup pasid table entry. * Caller of it should not modify the in-use pasid table entries.
*/ staticvoid pasid_flush_caches(struct intel_iommu *iommu, struct pasid_entry *pte,
u32 pasid, u16 did)
{ if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
/* * This function is supposed to be used after caller updates the fields * except for the SSADE and P bit of a pasid table entry. It does the * below: * - Flush cacheline if needed * - Flush the caches per Table 28 ”Guidance to Software for Invalidations“ * of VT-d spec 5.0.
*/ staticvoid intel_pasid_flush_present(struct intel_iommu *iommu, struct device *dev,
u32 pasid, u16 did, struct pasid_entry *pte)
{ if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
/* * VT-d spec 5.0 table28 states guides for cache invalidation: * * - PASID-selective-within-Domain PASID-cache invalidation * - PASID-selective PASID-based IOTLB invalidation * - If (pasid is RID_PASID) * - Global Device-TLB invalidation to affected functions * Else * - PASID-based Device-TLB invalidation (with S=1 and * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
*/
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
/* * Set up the scalable mode pasid table entry for first only * translation type.
*/ staticvoid pasid_pte_config_first_level(struct intel_iommu *iommu, struct pasid_entry *pte,
phys_addr_t fsptptr, u16 did, int flags)
{
lockdep_assert_held(&iommu->lock);
pasid_clear_entry(pte);
/* Setup the first level page table pointer: */
pasid_set_flptr(pte, fsptptr);
if (flags & PASID_FLAG_FL5LP)
pasid_set_flpm(pte, 1);
if (flags & PASID_FLAG_PAGE_SNOOP)
pasid_set_pgsnp(pte);
/* Setup Present and PASID Granular Transfer Type: */
pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY);
pasid_set_present(pte);
}
int intel_pasid_setup_first_level(struct intel_iommu *iommu, struct device *dev,
phys_addr_t fsptptr, u32 pasid, u16 did, int flags)
{ struct pasid_entry *pte;
if (!ecap_flts(iommu->ecap)) {
pr_err("No first level translation support on %s\n",
iommu->name); return -EINVAL;
}
if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
pr_err("No 5-level paging support for first-level on %s\n",
iommu->name); return -EINVAL;
}
if (!ecap_flts(iommu->ecap)) {
pr_err("No first level translation support on %s\n",
iommu->name); return -EINVAL;
}
if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) {
pr_err("No 5-level paging support for first-level on %s\n",
iommu->name); return -EINVAL;
}
/* * Set up the scalable mode pasid entry for second only translation type.
*/ staticvoid pasid_pte_config_second_level(struct intel_iommu *iommu, struct pasid_entry *pte,
u64 pgd_val, int agaw, u16 did, bool dirty_tracking)
{
lockdep_assert_held(&iommu->lock);
/* * If hardware advertises no support for second level * translation, return directly.
*/ if (!ecap_slts(iommu->ecap)) {
pr_err("No second level translation support on %s\n",
iommu->name); return -EINVAL;
}
pgd = domain->pgd;
pgd_val = virt_to_phys(pgd);
did = domain_id_iommu(domain, iommu);
/* * If hardware advertises no support for second level * translation, return directly.
*/ if (!ecap_slts(iommu->ecap)) {
pr_err("No second level translation support on %s\n",
iommu->name); return -EINVAL;
}
pgd = domain->pgd;
pgd_val = virt_to_phys(pgd);
did = domain_id_iommu(domain, iommu);
/* * Set up dirty tracking on a second only or nested translation type.
*/ int intel_pasid_setup_dirty_tracking(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool enabled)
{ struct pasid_entry *pte;
u16 did, pgtt;
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid); if (!pte) {
spin_unlock(&iommu->lock);
dev_err_ratelimited(
dev, "Failed to get pasid entry of PASID %d\n", pasid); return -ENODEV;
}
did = pasid_get_domain_id(pte);
pgtt = pasid_pte_get_pgtt(pte); if (pgtt != PASID_ENTRY_PGTT_SL_ONLY &&
pgtt != PASID_ENTRY_PGTT_NESTED) {
spin_unlock(&iommu->lock);
dev_err_ratelimited(
dev, "Dirty tracking not supported on translation type %d\n",
pgtt); return -EOPNOTSUPP;
}
if (pasid_get_ssade(pte) == enabled) {
spin_unlock(&iommu->lock); return 0;
}
if (enabled)
pasid_set_ssade(pte); else
pasid_clear_ssade(pte);
spin_unlock(&iommu->lock);
if (!ecap_coherent(iommu->ecap))
clflush_cache_range(pte, sizeof(*pte));
/* * From VT-d spec table 25 "Guidance to Software for Invalidations": * * - PASID-selective-within-Domain PASID-cache invalidation * If (PGTT=SS or Nested) * - Domain-selective IOTLB invalidation * Else * - PASID-selective PASID-based IOTLB invalidation * - If (pasid is RID_PASID) * - Global Device-TLB invalidation to affected functions * Else * - PASID-based Device-TLB invalidation (with S=1 and * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
*/
pasid_cache_invalidation_with_pasid(iommu, did, pasid);
/* * Set the page snoop control for a pasid entry which has been set up.
*/ void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, struct device *dev, u32 pasid)
{ struct pasid_entry *pte;
u16 did;
/** * intel_pasid_setup_nested() - Set up PASID entry for nested translation. * @iommu: IOMMU which the device belong to * @dev: Device to be set up for translation * @pasid: PASID to be programmed in the device PASID table * @domain: User stage-1 domain nested on a stage-2 domain * * This is used for nested translation. The input domain should be * nested type and nested on a parent with 'is_nested_parent' flag * set.
*/ int intel_pasid_setup_nested(struct intel_iommu *iommu, struct device *dev,
u32 pasid, struct dmar_domain *domain)
{ struct iommu_hwpt_vtd_s1 *s1_cfg = &domain->s1_cfg; struct dmar_domain *s2_domain = domain->s2_domain;
u16 did = domain_id_iommu(domain, iommu); struct pasid_entry *pte;
/* Address width should match the address width supported by hardware */ switch (s1_cfg->addr_width) { case ADDR_WIDTH_4LEVEL: break; case ADDR_WIDTH_5LEVEL: if (!cap_fl5lp_support(iommu->cap)) {
dev_err_ratelimited(dev, "5-level paging not supported\n"); return -EINVAL;
} break; default:
dev_err_ratelimited(dev, "Invalid stage-1 address width %d\n",
s1_cfg->addr_width); return -EINVAL;
}
if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) {
pr_err_ratelimited("No supervisor request support on %s\n",
iommu->name); return -EINVAL;
}
if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) {
pr_err_ratelimited("No extended access flag support on %s\n",
iommu->name); return -EINVAL;
}
spin_lock(&iommu->lock);
pte = intel_pasid_get_entry(dev, pasid); if (!pte) {
spin_unlock(&iommu->lock); return -ENODEV;
} if (pasid_pte_is_present(pte)) {
spin_unlock(&iommu->lock); return -EBUSY;
}
/* Address width should match the address width supported by hardware */ switch (s1_cfg->addr_width) { case ADDR_WIDTH_4LEVEL: break; case ADDR_WIDTH_5LEVEL: if (!cap_fl5lp_support(iommu->cap)) {
dev_err_ratelimited(dev, "5-level paging not supported\n"); return -EINVAL;
} break; default:
dev_err_ratelimited(dev, "Invalid stage-1 address width %d\n",
s1_cfg->addr_width); return -EINVAL;
}
if ((s1_cfg->flags & IOMMU_VTD_S1_SRE) && !ecap_srs(iommu->ecap)) {
pr_err_ratelimited("No supervisor request support on %s\n",
iommu->name); return -EINVAL;
}
if ((s1_cfg->flags & IOMMU_VTD_S1_EAFE) && !ecap_eafs(iommu->ecap)) {
pr_err_ratelimited("No extended access flag support on %s\n",
iommu->name); return -EINVAL;
}
/* * Get the PASID directory size for scalable mode context entry. * Value of X in the PDTS field of a scalable mode context entry * indicates PASID directory with 2^(X + 7) entries.
*/ staticunsignedlong context_get_sm_pds(struct pasid_table *table)
{ unsignedlong pds, max_pde;
if (info->ats_supported)
context_set_sm_dte(context); if (info->pasid_supported)
context_set_pasid(context); if (info->pri_supported)
context_set_sm_pre(context);
if (context_copied(iommu, bus, devfn)) {
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
/* * For kdump cases, old valid entries may be cached due to * the in-flight DMA and copied pgtable, but there is no * unmapping behaviour for them, thus we need explicit cache * flushes for all affected domain IDs and PASIDs used in * the copied PASID table. Given that we have no idea about * which domain IDs and PASIDs were used in the copied tables, * upgrade them to global PASID and IOTLB cache invalidation.
*/
iommu->flush.flush_context(iommu, 0,
PCI_DEVID(bus, devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
qi_flush_pasid_cache(iommu, 0, QI_PC_GLOBAL, 0);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
/* * At this point, the device is supposed to finish reset at * its driver probe stage, so no in-flight DMA will exist, * and we don't need to worry anymore hereafter.
*/
clear_context_copied(iommu, bus, devfn);
}
/* * It's a non-present to present mapping. If hardware doesn't cache * non-present entry we don't need to flush the caches. If it does * cache non-present entries, then it does so in the special * domain #0, which we have to flush:
*/ if (cap_caching_mode(iommu->cap)) {
iommu->flush.flush_context(iommu, 0,
PCI_DEVID(bus, devfn),
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH);
}
return device_pasid_table_setup(dev, PCI_BUS_NUM(alias), alias & 0xff);
}
/* * Set the device's PASID table to its context table entry. * * The PASID table is set to the context entries of both device itself * and its alias requester ID for DMA.
*/ int intel_pasid_setup_sm_context(struct device *dev)
{ struct device_domain_info *info = dev_iommu_priv_get(dev);
if (!dev_is_pci(dev)) return device_pasid_table_setup(dev, info->bus, info->devfn);
/* * Global Device-TLB invalidation following changes in a context entry which * was present.
*/ staticvoid __context_flush_dev_iotlb(struct device_domain_info *info)
{ if (!info->ats_enabled) return;
/* * There is no guarantee that the device DMA is stopped when it reaches * here. Therefore, always attempt the extra device TLB invalidation * quirk. The impact on performance is acceptable since this is not a * performance-critical path.
*/
quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, IOMMU_NO_PASID,
info->ats_qdep);
}
/* * Cache invalidations after change in a context table entry that was present * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). * This helper can only be used when IOMMU is working in the legacy mode or * IOMMU is in scalable mode but all PASID table entries of the device are * non-present.
*/ void intel_context_flush_no_pasid(struct device_domain_info *info, struct context_entry *context, u16 did)
{ struct intel_iommu *iommu = info->iommu;
/* * Device-selective context-cache invalidation. The Domain-ID field * of the Context-cache Invalidate Descriptor is ignored by hardware * when operating in scalable mode. Therefore the @did value doesn't * matter in scalable mode.
*/
iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn),
DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
/* * For legacy mode: * - Domain-selective IOTLB invalidation * - Global Device-TLB invalidation to all affected functions
*/ if (!sm_supported(iommu)) {
iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
__context_flush_dev_iotlb(info);
return;
}
__context_flush_dev_iotlb(info);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.3 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.