// SPDX-License-Identifier: GPL-2.0 /* * IOMMU mmap management and range allocation functions. * Based almost entirely upon the powerpc iommu allocator.
*/
/* * Initialize iommu_pool entries for the iommu_map_table. `num_entries' * is the number of table entries. If `large_pool' is set to true, * the top 1/4 of the table will be set aside for pool allocations * of more than iommu_large_alloc pages.
*/ void iommu_tbl_pool_init(struct iommu_map_table *iommu, unsignedlong num_entries,
u32 table_shift, void (*lazy_flush)(struct iommu_map_table *), bool large_pool, u32 npools, bool skip_span_boundary_check)
{ unsignedint start, i; struct iommu_pool *p = &(iommu->large_pool);
/* The case below can happen if we have a small segment appended * to a large, or when the previous alloc was at the very end of * the available space. If so, go back to the beginning. If a * flush is needed, it will get done based on the return value * from iommu_area_alloc() below.
*/ if (start >= limit)
start = pool->start;
shift = iommu->table_map_base >> iommu->table_shift; if (limit + shift > mask) {
limit = mask - shift + 1; /* If we're constrained on address range, first try * at the masked hint to avoid O(n) search complexity, * but on second pass, start at 0 in pool 0.
*/ if ((start & mask) >= limit || pass > 0) {
spin_unlock(&(pool->lock));
pool = &(iommu->pools[0]);
spin_lock(&(pool->lock));
start = pool->start;
} else {
start &= mask;
}
}
/* * if the skip_span_boundary_check had been set during init, we set * things up so that iommu_is_span_boundary() merely checks if the * (index + npages) < num_tsb_entries
*/ if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
shift = 0;
boundary_size = iommu->poolsize * iommu->nr_pools;
} else {
boundary_size = dma_get_seg_boundary_nr_pages(dev,
iommu->table_shift);
}
n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
boundary_size, align_mask); if (n == -1) { if (likely(pass == 0)) { /* First failure, rescan from the beginning. */
pool->hint = pool->start;
set_flush(iommu);
pass++; goto again;
} elseif (!largealloc && pass <= iommu->nr_pools) {
spin_unlock(&(pool->lock));
pool_nr = (pool_nr + 1) & (iommu->nr_pools - 1);
pool = &(iommu->pools[pool_nr]);
spin_lock(&(pool->lock));
pool->hint = pool->start;
set_flush(iommu);
pass++; goto again;
} else { /* give up */
n = IOMMU_ERROR_CODE; goto bail;
}
} if (iommu->lazy_flush &&
(n < pool->hint || need_flush(iommu))) {
clear_flush(iommu);
iommu->lazy_flush(iommu);
}
end = n + npages;
pool->hint = end;
/* Update handle for SG allocations */ if (handle)
*handle = end;
bail:
spin_unlock_irqrestore(&(pool->lock), flags);
/* The large pool is the last pool at the top of the table */ if (large_pool && entry >= largepool_start) {
p = &tbl->large_pool;
} else { unsignedint pool_nr = entry / tbl->poolsize;
BUG_ON(pool_nr >= tbl->nr_pools);
p = &tbl->pools[pool_nr];
} return p;
}
/* Caller supplies the index of the entry into the iommu map table * itself when the mapping from dma_addr to the entry is not the * default addr->entry mapping below.
*/ void iommu_tbl_range_free(struct iommu_map_table *iommu, u64 dma_addr, unsignedlong npages, unsignedlong entry)
{ struct iommu_pool *pool; unsignedlong flags; unsignedlong shift = iommu->table_shift;
if (entry == IOMMU_ERROR_CODE) /* use default addr->entry mapping */
entry = (dma_addr - iommu->table_map_base) >> shift;
pool = get_pool(iommu, entry);
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.16Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.