for (i = 0; i < 8; i++) {
u8 in_attr = mair >> (8 * i), out_attr;
u8 outer = in_attr >> 4, inner = in_attr & 0xf;
/* For caching to be enabled, inner and outer caching policy * have to be both write-back, if one of them is write-through * or non-cacheable, we just choose non-cacheable. Device * memory is also translated to non-cacheable.
*/ if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
} else {
out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2); /* Use SH_MIDGARD_INNER mode when device isn't coherent, * so SH_IS, which is used when IOMMU_CACHE is set, maps * to Mali's internal-shareable mode. As per the Mali * Spec, inner and outer-shareable modes aren't allowed * for WB memory when coherency is disabled. * Use SH_CPU_INNER mode when coherency is enabled, so * that SH_IS actually maps to the standard definition of * inner-shareable.
*/ if (!coherent)
out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER; else
out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
}
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending. */
ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000);
if (ret) { /* The GPU hung, let's trigger a reset */
panfrost_device_schedule_reset(pfdev);
dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
}
/* write AS_COMMAND when MMU is ready to accept another command */
status = wait_ready(pfdev, as_nr); if (!status)
mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
/* * The locked region is a naturally aligned power of 2 block encoded as * log2 minus(1). * Calculate the desired start/end and look for the highest bit which * differs. The smallest naturally aligned block must include this bit * change, the desired region starts with this bit (and subsequent bits) * zeroed and ends with the bit (and subsequent bits) set to one.
*/
region_width = max(fls64(region_start ^ (region_end - 1)),
const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
/* * Mask off the low bits of region_start (which would be ignored by * the hardware anyway)
*/
region_start &= GENMASK_ULL(63, region_width);
region = region_width | region_start;
/* Lock the region that needs to be updated */
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
staticint mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
u64 iova, u64 size, u32 op)
{ if (as_nr < 0) return 0;
if (op != AS_COMMAND_UNLOCK)
lock_region(pfdev, as_nr, iova, size);
/* Run the MMU operation */
write_cmd(pfdev, as_nr, op);
/* Wait for the flush to complete */ return wait_ready(pfdev, as_nr);
}
/* Need to revisit mem attrs. * NC is the default, Mali driver is inner WT.
*/
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
/* TODO: The following fields are duplicated between the MMU and Page * Table config structs. Ideally, should be kept in one place.
*/
mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
switch (fmt) { case ARM_64_LPAE_S1: return mmu_cfg_init_aarch64_4k(mmu); case ARM_MALI_LPAE: return mmu_cfg_init_mali_lpae(mmu); default: /* This should never happen */
drm_WARN(pfdev->ddev, 1, "Invalid pgtable format"); return -EINVAL;
}
}
u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
{ int as;
spin_lock(&pfdev->as_lock);
as = mmu->as; if (as >= 0) { int en = atomic_inc_return(&mmu->as_count);
u32 mask = BIT(as) | BIT(16 + as);
/* * AS can be retained by active jobs or a perfcnt context, * hence the '+ 1' here.
*/
WARN_ON(en >= (NUM_JOB_SLOTS + 1));
list_move(&mmu->list, &pfdev->as_lru_list);
if (pfdev->as_faulty_mask & mask) { /* Unhandled pagefault on this AS, the MMU was * disabled. We need to re-enable the MMU after * clearing+unmasking the AS interrupts.
*/
mmu_write(pfdev, MMU_INT_CLEAR, mask);
mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
pfdev->as_faulty_mask &= ~mask;
panfrost_mmu_enable(pfdev, mmu);
}
goto out;
}
/* Check for a free AS */
as = ffz(pfdev->as_alloc_mask); if (!(BIT(as) & pfdev->features.as_present)) { struct panfrost_mmu *lru_mmu;
/* Assign the free or reclaimed AS to the FD */
mmu->as = as;
set_bit(as, &pfdev->as_alloc_mask);
atomic_set(&mmu->as_count, 1);
list_add(&mmu->list, &pfdev->as_lru_list);
dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
static size_t get_pgsize(u64 addr, size_t size, size_t *count)
{ /* * io-pgtable only operates on multiple pages within a single table * entry, so we need to split at boundaries of the table size, i.e. * the next block size up. The distance from address A to the next * boundary of block size B is logically B - A % B, but in unsigned * two's complement where B is a power of two we get the equivalence * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
*/
size_t blk_offset = -addr % SZ_2M;
bomapping = addr_to_mapping(pfdev, as, addr); if (!bomapping) return -ENOENT;
bo = bomapping->obj; if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
bomapping->mmnode.start << PAGE_SHIFT);
ret = -EINVAL; goto err_bo;
}
WARN_ON(bomapping->mmu->as != as);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { /* Can happen if the last fault only partially filled this * section of the pages array before failing. In that case * we skip already filled pages.
*/ if (pages[i]) continue;
pages[i] = shmem_read_mapping_page(mapping, i); if (IS_ERR(pages[i])) {
ret = PTR_ERR(pages[i]);
pages[i] = NULL; goto err_unlock;
}
}
/* Page fault only */
ret = -1; if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
if (ret) { /* terminal fault, print info about the fault */
dev_err(pfdev->dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n" "Reason: %s\n" "raw fault status: 0x%X\n" "decoded fault status: %s\n" "exception type 0x%X: %s\n" "access type 0x%X: %s\n" "source id 0x%X\n",
as, addr, "TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
spin_lock(&pfdev->as_lock); /* Ignore MMU interrupts on this AS until it's been * re-enabled.
*/
pfdev->as_faulty_mask |= mask;
/* Disable the MMU to kill jobs on this AS. */
panfrost_mmu_disable(pfdev, as);
spin_unlock(&pfdev->as_lock);
}
status &= ~mask;
/* If we received new MMU interrupts, process them before returning. */ if (!status)
status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
}
/* Enable interrupts only if we're not about to get suspended */ if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
spin_lock(&pfdev->as_lock);
mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
spin_unlock(&pfdev->as_lock);
}
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{ int err;
pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); if (pfdev->mmu_irq < 0) return pfdev->mmu_irq;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.