/* * Large page/supersection entries are effectively a block of 16 page/section * entries, along the lines of the LPAE contiguous hint, but all with the * same output address. For want of a better common name we'll call them * "contiguous" versions of their respective page/section entries here, but * noting the distinction (WRT to TLB maintenance) that they represent *one* * entry repeated 16 times, not 16 separate entries (as in the LPAE case).
*/ #define ARM_V7S_CONT_PAGES 16
/* PTE type bits: these are all mixed up with XN/PXN bits in most cases */ #define ARM_V7S_PTE_TYPE_TABLE 0x1 #define ARM_V7S_PTE_TYPE_PAGE 0x2 #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1
/* * The attribute bits are consistently ordered*, but occupy bits [17:10] of * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual * fields relative to that 8-bit block, plus a total shift relative to the PTE.
*/ #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6)
phys = virt_to_phys(table); if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
phys >= (1ULL << cfg->oas) : phys != (arm_v7s_iopte)phys) { /* Doesn't fit in PTE */
dev_err(dev, "Page table does not fit in PTE: %pa", &phys); goto out_free;
} if (!cfg->coherent_walk) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; /* * We depend on the IOMMU being able to work with any physical * address directly, so if the DMA layer suggests otherwise by * translating or truncating them, that bodes very badly...
*/ if (dma != phys) goto out_unmap;
} if (lvl == 2)
kmemleak_ignore(table); return table;
staticint arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, unsignedlong iova, phys_addr_t paddr, int prot, int lvl, int num_entries, arm_v7s_iopte *ptep)
{ struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_v7s_iopte pte; int i;
for (i = 0; i < num_entries; i++) if (ARM_V7S_PTE_IS_TABLE(ptep[i], lvl)) { /* * We need to unmap and free the old table before * overwriting it with a block entry.
*/
arm_v7s_iopte *tblp;
size_t sz = ARM_V7S_BLOCK_SIZE(lvl);
tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg); if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
sz, lvl, tblp) != sz)) return -EINVAL;
} elseif (ptep[i]) { /* We require an unmap first */
WARN_ON(!selftest_running); return -EEXIST;
}
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) new = to_mtk_iopte(phys, new);
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) new |= ARM_V7S_ATTR_NS_TABLE;
/* * Ensure the table itself is visible before its PTE can be. * Whilst we could get away with cmpxchg64_release below, this * doesn't have any ordering semantics when !CONFIG_SMP.
*/
dma_wmb();
old = cmpxchg_relaxed(ptep, curr, new);
__arm_v7s_pte_sync(ptep, 1, cfg);
/* Find our entry at the current level */
ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg);
/* If we can install a leaf entry at this level, then do so */ if (num_entries) return arm_v7s_init_pte(data, iova, paddr, prot,
lvl, num_entries, ptep);
/* We can't allocate tables at the final level */ if (WARN_ON(lvl == 2)) return -EINVAL;
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep); if (!pte) {
cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data); if (!cptep) return -ENOMEM;
pte = arm_v7s_install_table(cptep, ptep, 0, cfg); if (pte)
__arm_v7s_free_table(cptep, lvl + 1, data);
} else { /* We've no easy way of knowing if it's synced yet, so... */
__arm_v7s_pte_sync(ptep, 1, cfg);
}
if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) {
cptep = iopte_deref(pte, lvl, data);
} elseif (pte) { /* We require an unmap first */
WARN_ON(!selftest_running); return -EEXIST;
}
if (!(prot & (IOMMU_READ | IOMMU_WRITE))) return -EINVAL;
while (pgcount--) {
ret = __arm_v7s_map(data, iova, paddr, pgsize, prot, 1, data->pgd,
gfp); if (ret) break;
iova += pgsize;
paddr += pgsize;
*mapped += pgsize;
} /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova.
*/
wmb();
/* Something went horribly wrong and we ran out of page table */ if (WARN_ON(lvl > 2)) return 0;
idx = ARM_V7S_LVL_IDX(iova, lvl, &iop->cfg);
ptep += idx; do {
pte[i] = READ_ONCE(ptep[i]); if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i]))) return 0;
} while (++i < num_entries);
/* * If we've hit a contiguous 'large page' entry at this level, it * needs splitting first, unless we're unmapping the whole lot. * * For splitting, we can't rewrite 16 PTEs atomically, and since we * can't necessarily assume TEX remap we don't have a software bit to * mark live entries being split. In practice (i.e. DMA API code), we * will never be splitting large pages anyway, so just wrap this edge * case in a lock for the sake of correctness and be done with it.
*/ if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) {
WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed"); return 0;
}
/* If the size matches this level, we're in the right place */ if (num_entries) {
size_t blk_size = ARM_V7S_BLOCK_SIZE(lvl);
/* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_EXT &&
!(cfg->quirks & IO_PGTABLE_QUIRK_NO_PERMS)) return NULL;
if ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT) &&
!arm_v7s_is_mtk_enabled(cfg)) return NULL;
data = kmalloc(sizeof(*data), GFP_KERNEL); if (!data) return NULL;
/* * ARM_MTK_TTBR_EXT extend the translation table base support larger * memory address.
*/
slab_flag = cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT ?
0 : ARM_V7S_TABLE_SLAB_FLAGS;
/* We have to do this early for __arm_v7s_alloc_table to work... */
data->iop.cfg = *cfg;
/* * Unless the IOMMU driver indicates supersection support by * having SZ_16M set in the initial bitmap, they won't be used.
*/
cfg->pgsize_bitmap &= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
/* * TEX remap: the indices used map to the closest equivalent types * under the non-TEX-remap interpretation of those attribute bits, * excepting various implementation-defined aspects of shareability.
*/
cfg->arm_v7s_cfg.prrr = ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE) |
ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL) |
ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL) |
ARM_V7S_PRRR_DS0 | ARM_V7S_PRRR_DS1 |
ARM_V7S_PRRR_NS1 | ARM_V7S_PRRR_NOS(7);
cfg->arm_v7s_cfg.nmrr = ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA) |
ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA);
/* Looking good; allocate a pgd */
data->pgd = __arm_v7s_alloc_table(1, GFP_KERNEL, data); if (!data->pgd) goto out_free_data;
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.