#define SWIOTLB_VERBOSE (1 << 0) /* verbose initialization */ #define SWIOTLB_FORCE (1 << 1) /* force bounce buffering */ #define SWIOTLB_ANY (1 << 2) /* allow any memory for the buffer */
/* * Maximum allowable number of contiguous slabs to map, * must be a power of 2. What is the appropriate value ? * The complexity of {map,unmap}_single is linearly dependent on this value.
*/ #define IO_TLB_SEGSIZE 128
/* * log of the size of each IO TLB slab. The number of slabs is command line * controllable.
*/ #define IO_TLB_SHIFT 11 #define IO_TLB_SIZE (1 << IO_TLB_SHIFT)
/* default to 64MB */ #define IO_TLB_DEFAULT_SIZE (64UL<<20)
unsignedlong swiotlb_size_or_default(void); void __init swiotlb_init_remap(bool addressing_limit, unsignedint flags, int (*remap)(void *tlb, unsignedlong nslabs)); int swiotlb_init_late(size_t size, gfp_t gfp_mask, int (*remap)(void *tlb, unsignedlong nslabs)); externvoid __init swiotlb_update_mem_attributes(void);
#ifdef CONFIG_SWIOTLB
/** * struct io_tlb_pool - IO TLB memory pool descriptor * @start: The start address of the swiotlb memory pool. Used to do a quick * range check to see if the memory was in fact allocated by this * API. * @end: The end address of the swiotlb memory pool. Used to do a quick * range check to see if the memory was in fact allocated by this * API. * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool * may be remapped in the memory encrypted case and store virtual * address for bounce buffer operation. * @nslabs: The number of IO TLB slots between @start and @end. For the * default swiotlb, this can be adjusted with a boot parameter, * see setup_io_tlb_npages(). * @late_alloc: %true if allocated using the page allocator. * @nareas: Number of areas in the pool. * @area_nslabs: Number of slots in each area. * @areas: Array of memory area descriptors. * @slots: Array of slot descriptors. * @node: Member of the IO TLB memory pool list. * @rcu: RCU head for swiotlb_dyn_free(). * @transient: %true if transient memory pool.
*/ struct io_tlb_pool {
phys_addr_t start;
phys_addr_t end; void *vaddr; unsignedlong nslabs; bool late_alloc; unsignedint nareas; unsignedint area_nslabs; struct io_tlb_area *areas; struct io_tlb_slot *slots; #ifdef CONFIG_SWIOTLB_DYNAMIC struct list_head node; struct rcu_head rcu; bool transient; #endif
};
/** * struct io_tlb_mem - Software IO TLB allocator * @defpool: Default (initial) IO TLB memory pool descriptor. * @pool: IO TLB memory pool descriptor (if not dynamic). * @nslabs: Total number of IO TLB slabs in all pools. * @debugfs: The dentry to debugfs. * @force_bounce: %true if swiotlb bouncing is forced * @for_alloc: %true if the pool is used for memory allocation * @can_grow: %true if more pools can be allocated dynamically. * @phys_limit: Maximum allowed physical address. * @lock: Lock to synchronize changes to the list. * @pools: List of IO TLB memory pool descriptors (if dynamic). * @dyn_alloc: Dynamic IO TLB pool allocation work. * @total_used: The total number of slots in the pool that are currently used * across all areas. Used only for calculating used_hiwater in * debugfs. * @used_hiwater: The high water mark for total_used. Used only for reporting * in debugfs. * @transient_nslabs: The total number of slots in all transient pools that * are currently used across all areas.
*/ struct io_tlb_mem { struct io_tlb_pool defpool; unsignedlong nslabs; struct dentry *debugfs; bool force_bounce; bool for_alloc; #ifdef CONFIG_SWIOTLB_DYNAMIC bool can_grow;
u64 phys_limit;
spinlock_t lock; struct list_head pools; struct work_struct dyn_alloc; #endif #ifdef CONFIG_DEBUG_FS
atomic_long_t total_used;
atomic_long_t used_hiwater;
atomic_long_t transient_nslabs; #endif
};
/** * swiotlb_find_pool() - find swiotlb pool to which a physical address belongs * @dev: Device which has mapped the buffer. * @paddr: Physical address within the DMA buffer. * * Find the swiotlb pool that @paddr points into. * * Return: * * pool address if @paddr points into a bounce buffer * * NULL if @paddr does not point into a bounce buffer. As such, this function * can be used to determine if @paddr denotes a swiotlb bounce buffer.
*/ staticinlinestruct io_tlb_pool *swiotlb_find_pool(struct device *dev,
phys_addr_t paddr)
{ struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
if (!mem) return NULL;
#ifdef CONFIG_SWIOTLB_DYNAMIC /* * All SWIOTLB buffer addresses must have been returned by * swiotlb_tbl_map_single() and passed to a device driver. * If a SWIOTLB address is checked on another CPU, then it was * presumably loaded by the device driver from an unspecified private * data structure. Make sure that this load is ordered before reading * dev->dma_uses_io_tlb here and mem->pools in __swiotlb_find_pool(). * * This barrier pairs with smp_mb() in swiotlb_find_slots().
*/
smp_rmb(); if (READ_ONCE(dev->dma_uses_io_tlb)) return __swiotlb_find_pool(dev, paddr); #else if (paddr >= mem->defpool.start && paddr < mem->defpool.end) return &mem->defpool; #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.