/* Page sizes supported by the hardware and small enough for @size */
pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
/* Constrain the page sizes further based on the maximum alignment */ if (likely(addr_merge))
pgsizes &= GENMASK(__ffs(addr_merge), 0);
/* Make sure we have at least one suitable page size */
BUG_ON(!pgsizes);
/* Pick the biggest page size remaining */
pgsize_idx = __fls(pgsizes);
pgsize = BIT(pgsize_idx); if (!count) return pgsize;
/* Find the next biggest support page size, if it exists */
pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0); if (!pgsizes) goto out_set_count;
/* * There's no point trying a bigger page size unless the virtual * and physical addresses are similarly offset within the larger page.
*/ if ((iova ^ paddr) & (pgsize_next - 1)) goto out_set_count;
/* Calculate the offset to the next page size alignment boundary */
offset = pgsize_next - (addr_merge & (pgsize_next - 1));
/* * If size is big enough to accommodate the larger page, reduce * the number of smaller pages.
*/ if (offset + pgsize_next <= size)
size = offset;
unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL); if (unmapped <= 0) {
ret = -EINVAL; /* * Continue attempting to unamp the remained of the * range, so we don't end up with some dangling * mapped pages
*/
unmapped = PAGE_SIZE;
}
ret = ops->map_pages(ops, addr, phys, pgsize, count,
prot, GFP_KERNEL, &mapped);
/* map_pages could fail after mapping some of the pages, * so update the counters before error handling.
*/
phys += mapped;
addr += mapped;
size -= mapped;
len -= mapped;
/* * If this is the last attached pagetable for the parent, * disable TTBR0 in the arm-smmu driver
*/
mutex_lock(&iommu->init_lock); if (--iommu->pagetables == 0) {
adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
/** * alloc_pt() - Custom page table allocator * @cookie: Cookie passed at page table allocation time. * @size: Size of the page table. This size should be fixed, * and determined at creation time based on the granule size. * @gfp: GFP flags. * * We want a custom allocator so we can use a cache for page table * allocations and amortize the cost of the over-reservation that's * done to allow asynchronous VM operations. * * Return: non-NULL on success, NULL if the allocation failed for any * reason.
*/ staticvoid *
msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
{ struct msm_iommu_pagetable *pagetable = cookie; struct msm_mmu_prealloc *p = pagetable->base.prealloc; void *page;
/* Allocation of the root page table happening during init. */ if (unlikely(!pagetable->root_page_table)) { struct page *p;
p = alloc_pages_node(dev_to_node(pagetable->iommu_dev),
gfp | __GFP_ZERO, get_order(size));
page = p ? page_address(p) : NULL;
pagetable->root_page_table = page; return page;
}
if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count)) return NULL;
page = p->pages[p->ptr++];
memset(page, 0, size);
/* * Page table entries don't use virtual addresses, which trips out * kmemleak. kmemleak_alloc_phys() might work, but physical addresses * are mixed with other fields, and I fear kmemleak won't detect that * either. * * Let's just ignore memory passed to the page-table driver for now.
*/
kmemleak_ignore(page);
return page;
}
/** * free_pt() - Custom page table free function * @cookie: Cookie passed at page table allocation time. * @data: Page table to free. * @size: Size of the page table. This size should be fixed, * and determined at creation time based on the granule size.
*/ staticvoid
msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size)
{ struct msm_iommu_pagetable *pagetable = cookie;
/* Get the pagetable configuration from the domain */ if (adreno_smmu->cookie)
ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
/* * If you hit this WARN_ONCE() you are probably missing an entry in * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
*/ if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables")) return ERR_PTR(-ENODEV);
pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL); if (!pagetable) return ERR_PTR(-ENOMEM);
/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
ttbr0_cfg = *ttbr1_cfg;
/* The incoming cfg will have the TTBR1 quirk enabled */
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
ttbr0_cfg.tlb = &tlb_ops;
if (!kernel_managed) {
ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
/* * With userspace managed VM (aka VM_BIND), we need to pre- * allocate pages ahead of time for map/unmap operations, * handing them to io-pgtable via custom alloc/free ops as * needed:
*/
ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt;
ttbr0_cfg.free = msm_iommu_pagetable_free_pt;
/* * Restrict to single page granules. Otherwise we may run * into a situation where userspace wants to unmap/remap * only a part of a larger block mapping, which is not * possible without unmapping the entire block. Which in * turn could cause faults if the GPU is accessing other * parts of the block mapping. * * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm: * Remove split on unmap behavior)" this was handled in * io-pgtable-arm. But this apparently does not work * correctly on SMMUv3.
*/
WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
}
if (!pagetable->pgtbl_ops) {
kfree(pagetable); return ERR_PTR(-ENOMEM);
}
/* * If this is the first pagetable that we've allocated, send it back to * the arm-smmu driver as a trigger to set up TTBR0
*/
mutex_lock(&iommu->init_lock); if (iommu->pagetables++ == 0) {
ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg); if (ret) {
iommu->pagetables--;
mutex_unlock(&iommu->init_lock);
free_io_pgtable_ops(pagetable->pgtbl_ops);
kfree(pagetable); return ERR_PTR(ret);
}
BUG_ON(iommu->prr_page); if (adreno_smmu->set_prr_bit) { /* * We need a zero'd page for two reasons: * * 1) Reserve a known physical address to use when * mapping NULL / sparsely resident regions * 2) Read back zero * * It appears the hw drops writes to the PRR region * on the floor, but reads actually return whatever * is in the PRR page.
*/
iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
adreno_smmu->set_prr_addr(adreno_smmu->cookie,
page_to_phys(iommu->prr_page));
adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
}
}
mutex_unlock(&iommu->init_lock);
/* Needed later for TLB flush */
pagetable->parent = parent;
pagetable->tlb = ttbr1_cfg->tlb;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
/* * TODO we would like each set of page tables to have a unique ASID * to optimize TLB invalidation. But iommu_flush_iotlb_all() will * end up flushing the ASID used for TTBR1 pagetables, which is not * what we want. So for now just use the same ASID as TTBR1.
*/
pagetable->asid = 0;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.