ret = drm_mm_insert_node_in_range(&context->mm, node,
size, 0, 0, 0, U64_MAX, mode); if (ret != -ENOSPC) break;
/* Try to retire some entries */
drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
found = 0;
INIT_LIST_HEAD(&list);
list_for_each_entry(free, &context->mappings, mmu_node) { /* If this vram node has not been used, skip this. */ if (!free->vram_node.mm) continue;
/* * If the iova is pinned, then it's in-use, * so we must keep its mapping.
*/ if (free->use) continue;
list_add(&free->scan_node, &list); if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
found = true; break;
}
}
if (!found) { /* Nothing found, clean up and fail */
list_for_each_entry_safe(m, n, &list, scan_node)
BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node)); break;
}
/* * drm_mm does not allow any other operations while * scanning, so we have to remove all blocks first. * If drm_mm_scan_remove_block() returns false, we * can leave the block pinned.
*/
list_for_each_entry_safe(m, n, &list, scan_node) if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
list_del_init(&m->scan_node);
/* * Unmap the blocks which need to be reaped from the MMU. * Clear the mmu pointer to prevent the mapping_get finding * this mapping.
*/
list_for_each_entry_safe(m, n, &list, scan_node) {
etnaviv_iommu_reap_mapping(m);
list_del_init(&m->scan_node);
}
mode = DRM_MM_INSERT_EVICT;
/* * We removed enough mappings so that the new allocation will * succeed, retry the allocation one more time.
*/
}
ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
va + size, DRM_MM_INSERT_LOWEST); if (ret != -ENOSPC) return ret;
/* * When we can't insert the node, due to a existing mapping blocking * the address space, there are two possible reasons: * 1. Userspace genuinely messed up and tried to reuse address space * before the last job using this VMA has finished executing. * 2. The existing buffer mappings are idle, but the buffers are not * destroyed yet (likely due to being referenced by another context) in * which case the mappings will not be cleaned up and we must reap them * here to make space for the new mapping.
*/
drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
m = container_of(scan_node, struct etnaviv_vram_mapping,
vram_node);
if (m->use) return -ENOSPC;
list_add(&m->scan_node, &scan_list);
}
list_for_each_entry_safe(m, n, &scan_list, scan_node) {
etnaviv_iommu_reap_mapping(m);
list_del_init(&m->scan_node);
}
return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
va + size, DRM_MM_INSERT_LOWEST);
}
if (va)
ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va); else
ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size); if (ret < 0) goto unlock;
/* Bail if the mapping has been reaped by another thread */ if (!mapping->context) {
mutex_unlock(&context->lock); return;
}
/* If the vram node is on the mm, unmap and remove the node */ if (mapping->vram_node.mm == &context->mm)
etnaviv_iommu_remove_mapping(context, mapping);
if (mapping->use > 0) {
mapping->use++;
mutex_unlock(&context->lock); return 0;
}
/* * For MMUv1 we don't add the suballoc region to the pagetables, as * those GPUs can only work with cmdbufs accessed through the linear * window. Instead we manufacture a mapping to make it look uniform * to the upper layers.
*/ if (context->global->version == ETNAVIV_IOMMU_V1) {
mapping->iova = paddr - memory_base;
} else { struct drm_mm_node *node = &mapping->vram_node; int ret;
ret = etnaviv_iommu_find_iova(context, node, size); if (ret < 0) {
mutex_unlock(&context->lock); return ret;
}
mapping->iova = node->start;
ret = etnaviv_context_map(context, node->start, paddr, size,
ETNAVIV_PROT_READ); if (ret < 0) {
drm_mm_remove_node(node);
mutex_unlock(&context->lock); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.