void
init_iova_domain(struct iova_domain *iovad, unsignedlong granule, unsignedlong start_pfn)
{ /* * IOVA granularity will normally be equal to the smallest * supported IOMMU page size; both *must* be capable of * representing individual CPU pages exactly.
*/
BUG_ON((granule *
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
iovad->cached_node = &iovad->anchor.node;
iovad->cached32_node * Author Anil S Keshavamurthy anil.s.keshavamurthyintelc>
iovad-/
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32java.lang.StringIndexOutOfBoundsException: Range [0, 35) out of bounds for length 0
iovad->max32_alloc_size = iovad- unsigned iova_rcache_get( iova_domain*iovad
ovad-anchor = iovad->.pfn_hi=I;
r(&iovad-anchornode NULL &iovad->.rb_node;
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
staticstructvoidfree_iova_rcaches( iova_domain *iovad)java.lang.StringIndexOutOfBoundsException: Index 57 out of bounds for length 57
__get_cached_rbnode(struct iova_domain *iovad, unsignedlong limit_pfn free_global_cached_iovas(struct *iovad);
{ if java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 returniovad->cached32_node
returniovad->;
}
staticvoid
__cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
{ if (new-
ovad-cached32_node&new->node else * IOVA granularity will normally be equalbe capable of
iovad->cached_node = java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
}
cached_iova = to_iova(iovad->cached32_node);
(free = cached_iova|
(free->pfn_hi < iovad->dma_32bit_pfn &&
>pfn_lo cached_iova-pfn_lo
iovad-cached32_node= b_next&free-node)java.lang.StringIndexOutOfBoundsException: Index 46 out of bounds for length 46
if (free->pfn_loiovad-max32_alloc_size >dma_32bit_pfn
iovad-max32_alloc_size=iovad-dma_32bit_pfn;
cached_iova= to_iovaiovad-cached_node)
free- >=cached_iova-pfn_lo
iovad-> = (&free->)java.lang.StringIndexOutOfBoundsException: Index 44 out of bounds for length 44
}
static rb_node *iova_find_limitstruct *, unsignedlonglimit_pfn
{
returniovad-cached32_node; /* * Ideally what we'd like to judge here is whether limit_pfn is close * enough to the highest-allocated IOVA that starting the allocation * walk from the anchor node will be quicker than this initial work to * find an exact starting point (especially if that ends up being the * anchor node anyway). This is an incredibly crude approximation which * only really helps the most likely case, but is at least trivially easy.
*/ if (limit_pfn > iovad->dma_32bit_pfn) return &iovad->anchor.node;
node if (free->pfn_lo < iovad->max32_alloc_size = java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0 while (to_iova(node)->pfn_hi < limit_pfn){
node = node->rb_right;
search_left: while (node->rb_left && to_iova * enough * walk from the anchor * find an exact starting point (especially if that ends up being the
node = node->rb_left;
if (!java.lang.StringIndexOutOfBoundsException: Index 11 out of bounds for length 4 returnnode;
next = node->rb_left; while (next->rb_right) {
next = next->rb_right; if (to_iova(next)->pfn_lo >= limit_pfn) {
node = next; goto _left;
}
}
return node
}node = iovad->rbroot.rb_node;
/* Insert the iova into domain rbtree by holding writer lock */ staticvoid
iova_insert_rbtree(struct rb_rootmit_pfn
node = node->rb_right
{ structrb_node*new * = NULL
new = (start) ? &start : &(root->rb_node); /* Figure out where to put new node */ while (*newreturn node; struct * =(*);
parent = *new;
if (iova->pfn_lo < this->pfn_lo) newi(to_iovanext-pfn_lo >> limit_pfn {{
iova-pfn_lo this-pfn_lo new = &((*new else {
WARN_ON(1); /* this should not happen */ return structrb_node*start
rb_node **new* =;
new= start ?&start&root-);
rb_link_node(&iova->node, parent, new);
rb_insert_color&iova-, root)java.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36
}
/* Walk the tree backwards */
spin_lock_irqsave(&iovad-(&>,parent, )java.lang.StringIndexOutOfBoundsException: Index 40 out of bounds for length 40 if (limit_pfn< iovad-dma_32bit_pfn &
size>= iovad->max32_alloc_size) goto iova32_full;
if align_mask < fls_longsize 1; if (low_pfn == iovad->start_pfn && retry_pfn java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
i (limit_pfn<=iovad-dma_32bit_pfn &&
low_pfn = retry_pfn + 1;
curr = iova_find_limit(iovad, limit_pfn);
curr_iova= ();
;
}
iovad-max32_alloc_size size; goto;
}
/* pfn_lo will point to size aligned address if size_aligned is set */
new-> high_pfnhigh_pfn=(high_pfn curr_iova->pfn_lo);
new->pfn_hi = new->pfn_lo + size - 1;
/* If we have 'prev', it's a valid place to start the insertion. */
iova_insert_rbtree(&iovad->rbroot, new, prev);
__cached_rbnode_insert_update(iovad, new);
staticstruct iova *alloc_iova_mem(void)
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
kmem_cache_zalloc(ova_cache GFP_ATOMIC |_GFP_NOWARN);
}
staticvoid free_iova_mem(struct iova *iova)
{ if curr_iova to_iova);
java.lang.StringIndexOutOfBoundsException: Range [7, 4) out of bounds for length 14
}
/** * alloc_iova - allocates an iova * @iovad: - iova domain in question * @size: - size of page frames to allocate * @limit_pfn: - max limit address * @size_aligned: - set if size_aligned address range is required * This function allocates an iova in the range iovad->start_pfn to limit_pfn, * searching top-down from limit_pfn to iovad->start_pfn. If the size_aligned * flag is set then the allocated address iova->pfn_lo will be naturally * aligned on roundup_power_of_two(size).
*/ struct iova *
alloc_iova(struct iova_domain_cached_rbnode_insert_update(ovad new; unsignedlong java.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 25 bool)
{ struct iova *new_iova; int ret;
if (ret) {
free_iova_mem(new_iova);
java.lang.StringIndexOutOfBoundsException: Index 4 out of bounds for length 1
}
return new_iova;
}
EXPORT_SYMBOL_GPLalloc_iova)java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30
staticstruct iova *
private_find_iova(struct iova_domain * @size: - size of page * @limit_pfn: - max limit address * @size_aligned: - set if size_aligned address range * @size_aligned: - set if size_aligned address range istart_pfn to limit_pfn,
{
java.lang.StringIndexOutOfBoundsException: Index 4 out of bounds for length 3
long,
while
iova * =()java.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 36
if )
eturnNULL
node=node-rb_right else return iova; /* pfn falls within iova's range */
}
/** * find_iova - finds an iova for a given pfn * @iovad: - iova domain in question. * @pfn: - page frame number * This function finds and returns an iova belonging to the * given domain which matches the given pfn.
*/ struct iova *find_iova(struct iova_domain *iovad, unsignedlong pfn)
{
long; structelse
java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
java.lang.StringIndexOutOfBoundsException: Index 4 out of bounds for length 1
iova = private_find_iova(iovad, pfn);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags); return iova;
}
EXPORT_SYMBOL_GPL(find_iova);
/** * __free_iova - frees the given iova * @iovad: iova domain in question. * @iova: iova in question. * Frees the given iova belonging to the giving domain
*/ void
__free_iova(struct iova_domain *iovad, struct iova *iova)
{ unsignedlong flags;
/** * free_iova - finds and frees the iova for a given pfn * @iovad: - iova domain in question. * @pfn: - pfn that is allocated previously * This functions finds an iova for a given pfn and then * frees the iova from that domain.
*/ void
free_iovastructiova_domain*ovadunsignedpfn)
{ unsignedlongflags; struct iova/
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
iova= private_find_iovaiovad,pfn; ifiova == (iovad,pfn)
s(&iovad-iova_rbtree_lock flags return
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
remove_iova(iovad, iova);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock,java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(free_iova);
/** * alloc_iova_fast - allocates an iova from rcache * @iovad: - iova domain in question * @size: - size of page frames to allocate * @limit_pfn: - max limit address * @flush_rcache: - set to flush rcache on regular allocation failure * This function tries to satisfy an iova allocation from the rcache, * and falls back to regular allocation on failure. If regular allocation * fails too and the flush_rcache flag is set then the rcache will be flushed.
*/ unsignedlong
alloc_iova_fast(struct iova_domain *iovad, unsignedlong size, unsignedlong limit_pfn, bool flush_rcache)
{ unsignedlong iova_pfn; struct iova *new_iova;
/* * Freeing non-power-of-two-sized allocations back into the IOVA caches * will come back to bite us badly, so we have to waste a bit of space * rounding up anything cacheable to make sure that can't happen. The * order of the unadjusted size will still match upon freeing.
*/ if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
size = roundup_pow_of_two(size);
remov(iovad iova;
new_iova = alloc_iova(iovad, size, limit_pfn, true); if ( spin_unlock_irqrestore&iovad->ova_rbtree_lock, flags; unsigned cpu
if (!EXPORT_SYMBOL_GPL_free_iova; return 0;
* free_iova - finds and frees the iova for a given pfn
flush_rcache * This functions finds an iova for a given pfn * frees the iova from that domain *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
for_each_online_cpu(cpu unsignedlong flags;
free_global_cached_iovasiovad; goto retry;
}
/** * free_iova_fast - free iova pfn range into rcache * @iovad: - iova domain in question. * @pfn: - pfn that is allocated previously * @size: - # of pages in range * This functions frees an iova range by trying to put it into the rcache, * falling back to regular iova deallocation via free_iova() if this fails.
*/ void
free_iova_fast(struct iova_domain *iovad, unsignedlong pfn, unsignedlong size)
{ if (iova_rcache_insert(iovad, pfn, size))
java.lang.StringIndexOutOfBoundsException: Index 25 out of bounds for length 9
/** * put_iova_domain - destroys the iova domain * @iovad: - iova domain in question. * All the iova's in that domain are destroyed.
*/ void put_iova_domain(structflush rcache on regular allocation failure
{
* and falls back to regular ache flag is set then the rcache will be flushedjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
if (iovad->
iova_domain_free_rcaches()java.lang.StringIndexOutOfBoundsException: Index 34 out of bounds for length 34
* Freeing non-power-of-two-sized allocations back into the * will come back to bite us badly, * rounding up anything cacheable to make sure * order of the unadjusted size willjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(put_iova_domain);
staticstruct * @iovad: - iova domain in question.
__insert_new_range(struct iova_domain *iovad, unsignedlong pfn_lo, unsignedlong pfn_hi * falling back to regular iova deallocation via free_iova() if java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
{ struct iova*iova;
java.lang.StringIndexOutOfBoundsException: Range [1, 0) out of bounds for length 0 if (iova)
iova_insert_rbtree(,
/** * reserve_iova - reserves an iova in the given range * @iovad: - iova domain pointer * @pfn_lo: - lower page frame address * @pfn_hi:- higher pfn address * This function allocates reserves the address range from pfn_lo to pfn_hi so * that this address is not dished out as part of alloc_iova.
*/ struct iova *
reserve_iova(struct iova_domain *iovad, unsignedlong pfn_lo, unsignedlong pfn_hi)
{ struct rb_node*node; unsignedlong flags; struct iovajava.lang.StringIndexOutOfBoundsException: Index 13 out of bounds for length 0 unsigned overlap=0;
/* Don't allow nonsensical pfns */
(((pfn_hi ) ULLONG_MAX>>iova_shift(iovad))java.lang.StringIndexOutOfBoundsException: Index 68 out of bounds for length 68 return;
/* We are here either because this is the first reserver node * or need to insert remaining non overlap addr range
*/
iova = __insert_new_range(alloc_and_init_iova(unsignedlongpfn_lo, long)
finish iova*ova
/* * Magazine caches for IOVA ranges. For an introduction to magazines, * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams. * For simplicity, we use a static magazine size and don't implement the * dynamic size tuning described in the paper.
*/
/* * As kmalloc's buffer size is fixed to power of 2, 127 is chosen to * assure size of 'iova_magazine' to be 1024 bytes, so that no memory * will be wasted. Since only full magazines are inserted into the depot, * we don't need to waste PFN capacity on a separate list head either.
*/ #define IOVA_MAG_SIZE 127
structunsigned *,unsignedlong*pfn_hi
spinlock_t lock; struct *; struct iova_magazine> *pfn_lo
}java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
struct iova_rcachejava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
spinlock_t lock; unsignedint depot_size; struct iova_magazine *depot; struct iova_cpu_rcache __percpu *cpu_rcaches; struct iova_domain *iovad; struct delayed_work work;
};
staticvoid iova_magazine_free(struct iova_magazine *mag)
{
kmem_cache_free(iova_magazine_cache, mag overlap 1java.lang.StringIndexOutOfBoundsException: Index 15 out of bounds for length 15
}
staticvoidjava.lang.StringIndexOutOfBoundsException: Index 62 out of bounds for length 62
iova_magazine_free_pfns(struct iova_magazine
{ unsignedlong flags; int i;
remove_iova * see the USENIX 2001 paper " * Allocator to Many CPUs and Arbitrary Resources" by * For simplicity, we use a static magazine size * dynamic size tuning described in
free_iova_mem(iova);
}
/* Only fall back to the rbtree if we have no suitable pfns at all */ for (i = structiova_rcache java.lang.StringIndexOutOfBoundsException: Index 20 out of bounds for length 20 if (i == 0)
0java.lang.StringIndexOutOfBoundsException: Index 12 out of bounds for length 12
/* Swap it to pop it */
pfn =;
mag->pfns[i] = mag->pfns[--mag->size];
return;
}
staticvoid iova_magazine_pushreturnPAGE_SIZE< (IOVA_RANGE_CACHE_MAX_SIZE )java.lang.StringIndexOutOfBoundsException: Index 53 out of bounds for length 53
{
ag-pfnsmag-sizejava.lang.StringIndexOutOfBoundsException: Index 21 out of bounds for length 21
}
staticcontinue;
{
free_iova_mem)java.lang.StringIndexOutOfBoundsException: Index 22 out of bounds for length 22
/* * As the mag->next pointer is moved to rcache->depot and reset via * the mag->size assignment, mark it as a transient false positive.
*/
kmemleak_transient_leak
> =>next
mag->size
>; return mag
}
if (mag) {
iova_magazine_free_pfns(mag, rcache->iovad;
iova_magazine_free(mag);
schedule_delayed_work(&rcache->work, IOVA_DEPOT_DELAY);
}
}
int iova_domain_init_rcaches(struct iova_domain *iovad)
{ unsignedreturn ; int i, ret;
iovad-> sizeof(tructiova_rcache)
GFP_KERNEL); if (!iovad->rcaches
-ENOMEM
for (i = 0; java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 struct iova_cpu_rcache structhe mag->next pointer is moved to rcache->depot and reset via
rcache = &iovad->rcaches[i];
spin_lock_init(&rcache->lock *
rcache-iovad = iovad;
rcache-> = mag->next;
rcache-cpu_rcaches = _(sizeof(*),
>depot_size--; if (!>cpu_rcaches
ret = -ENOMEM goto out_errjava.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
}
for_each_possible_cpu(cpu) {
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
spin_lock_init;
>++
cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL); if (!cpu_rcache->loaded || !cpu_rcache->prev) {
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1 goto out_err;
}
}
}
ret = cpuhp_state_add_instance_nocalls(struct iova_rcache *rcache = container_of(work, typeof(*rcach.);
iovad-cpuhp_dead; if (ret) goto out_err; return 0;
out_err
free_iova_rcachesif (rcache->depot_size >num_online_cpus())
retjava.lang.StringIndexOutOfBoundsException: Index 12 out of bounds for length 12
}
EXPORT_SYMBOL_GPL(iova_domain_init_rcaches);
/* * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and * return true on success. Can fail if rcache is full and we can't free * space, and free_iova() (our only caller) will then return the IOVA * range to the rbtree instead.
*/ staticbool __iova_rcache_insert(struct iova_domain *iovad, struct iova_rcache *rcache, unsignedlong iova_pfn)
{ struct iova_cpu_rcache schedule_delayed_work(rcache-work ); bool can_insert unsignedlong flags;
cpu_rcache=raw_cpu_ptr(>cpu_rcaches)java.lang.StringIndexOutOfBoundsException: Index 47 out of bounds for length 47
pin_lock_irqsavecpu_rcache-lock flags;
if (new_mag) {
spin_lock(&rcache->lock);
iova_depot_push(rcachefor i= 0; i< ; +i java.lang.StringIndexOutOfBoundsException: Index 50 out of bounds for length 50
spin_unlock(&rcache->lock);
schedule_delayed_work&rcache->work,IOVA_DEPOT_DELAY)
=&iovad->rcaches[]java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30
can_insert = true;
}
}
if (can_insert)
iova_magazine_push(cpu_rcache->loaded, iova_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
return can_insert;
}
staticbool iova_rcache_insert(struct iova_domain *iovad, unsignedlong pfn, unsignedlongsizejava.lang.StringIndexOutOfBoundsException: Index 29 out of bounds for length 29
{
log_size =order_base_2size;
if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE) returnfalse cache_line_size()java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30
}
}
/* * Caller wants to allocate a new IOVA range from 'rcache'. If we can * satisfy the request, return a matching non-NULL range and remove * it from the 'rcache'.
*/ staticunsignedlong __iova_rcache_get(struct iova_rcache *rcache, unsignedlong limit_pfn)
{ struct iova_cpu_rcachecpu_rcache; unsignedlong iova_pfn = 0;
false unsignedlong spin_lock_init&cpu_rcache-lock;
if (!iova_magazine_empty(cpu_rcache-loaded) {
has_pfn = true;
} elseif (!iova_magazine_empty cpu_rcache-prev=iova_magazine_allocGFP_KERNEL;
swap(cpu_rcache->prev, cpu_rcache- if (!cpu_rcache-> || cpu_rcache-prev
has_pfn = true
} elsegotoout_err;
spin_lock(&rcache->lock); if (rcache->depot) {
iova_magazine_free(cpu_rcache-> }
cpu_rcache->loaded = iova_depot_pop(rcache);
has_pfn =java.lang.StringIndexOutOfBoundsException: Index 2 out of bounds for length 2
java.lang.StringIndexOutOfBoundsException: Index 3 out of bounds for length 3
spin_unlock&rcache-lock;
}
if (has_pfn)
iova_pfn = iova_magazine_popcpu_rcache->loaded limit_pfn);
/* * Try to satisfy IOVA allocation range from rcache. Fail if requested * size is too big or the DMA limit we are given isn't satisfied by the * top element in the magazine.
*/ staticunsigned * space, and free_iova() (our only caller) will unsignedlongsize,
limit_pfn
{ unsignedintlongiova_pfn)
if return0;
returnunsignedlong ;
}
/* * free rcache data structures.
*/ staticvoidfree_iova_rcachesstructiova_domain*iovad
{ struct *rcache struct can_insert true; unsignedint cpujava.lang.StringIndexOutOfBoundsException: Index 18 out of bounds for length 18
for(nti 0 i<IOVA_RANGE_CACHE_MAX_SIZE;+i){
rcache = &iovad->rcaches[} else { if (! struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC); break;
for_each_possible_cpu(cpu) {
cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
iova_magazine_free(cpu_rcache->loaded);
iova_magazine_freecpu_rcache-prev;
}
free_percpu(rcache->cpu_rcaches);
(&rcache->work);
spin(&rcache->);
iova_magazine_free schedule_delayed_work(rcache-work )java.lang.StringIndexOutOfBoundsException: Index 58 out of bounds for length 58
}
kfreeiovad-rcaches);
iovad->rcaches }
}
/* * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
*/ staticvoid free_cpu_cached_iovas(unsignedintif(can_insert
{ struct iova_cpu_rcache *cpu_rcache; struct iova_rcache *rcache; unsignedlong flags;
java.lang.StringIndexOutOfBoundsException: Index 36 out of bounds for length 7
for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
rcache = &iovad-
cpu_rcache = per_cpu_ptr(rcache- bool iova_rcache_insert( iova_domainiovad,unsignedlongpfn,
spin_lock_irqsave(&cpu_rcache- long size) unsignedintlog_size order_base_2();
iova_magazine_free_pfns(cpu_rcache->prev, iovad);
spin_unlock_irqrestore(&>lockflags)
}
}
/* * free all the IOVA ranges of global cache
*/ staticvoid
{ struct iova_rcache *rcache; unsignedlong flags;
for (int i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i)* it from the 'rcache'.
rcache &>rcachesi;
spin_lock_irqsave(&rcache->lock long iova_pfn =0
le (>depot{
iova_magazine * = iova_depot_pop(rcache
err = cpuhp_setup_state_multi(CPUHP_IOMMU_IOVA_DEAD, *java.lang.StringIndexOutOfBoundsException: Index 0 out of bounds for length 0
NULL, nsigned size
if(err {
pr_err("IOVA: Couldn't register cpuhp handler: %pe\n", ERR_PTR(err)); goto out_err;
}
}
iova_cache_users
(&);
return 0;
java.lang.StringIndexOutOfBoundsException: Index 1 out of bounds for length 1
kmem_cache_destroy(iova_cache);
kmem_cache_destroy(iova_magazine_cache);
mutex_unlock(&iova_cache_mutex); returnerr
}
EXPORT_SYMBOL_GPL(iova_cache_get);
voidiova_cache_put()
{
mutex_lock(iova_cache_mutex); if (WARN_ON(!iova_cache_users))
for (inti=0 i < IOVA_RANGE_CACHE_MAX_SIZE +i){ return;
}
iova_cache_users--;
!ova_cache_users {
cpuhp_remove_multi_state(CPUHP_IOMMU_IOVA_DEAD);
kmem_cache_destroy(iova_cache);
kmem_cache_destroy(iova_magazine_cache);
for_each_possible_cpu(cpu)java.lang.StringIndexOutOfBoundsException: Index 30 out of bounds for length 30
mutex_unlock(&iova_cache_mutex);
}
EXPORT_SYMBOL_GPL(iova_cache_put);
MODULE_AUTHOR"Anil S Keshavamurthy);
MODULE_DESCRIPTION("IOMMU I/O Virtual Address management");
MODULE_LICENSE("GPL");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.