/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs
*/ #define nv50_instmem(p) container_of((p), struct nv50_instmem, base) #include"priv.h"
/* Attempt to allocate BAR2 address-space and map the object * into it. The lock has to be dropped while doing this due * to the possibility of recursion for page table allocation.
*/
mutex_unlock(&imem->base.mutex); while ((ret = nvkm_vmm_get(vmm, 12, size, &bar))) { /* Evict unused mappings, and keep retrying until we either * succeed,or there's no more objects left on the LRU.
*/
mutex_lock(&imem->base.mutex);
eobj = list_first_entry_or_null(&imem->lru, typeof(*eobj), lru); if (eobj) {
nvkm_debug(subdev, "evict %016llx %016llx @ %016llx\n",
nvkm_memory_addr(&eobj->base.memory),
nvkm_memory_size(&eobj->base.memory),
eobj->bar->addr);
list_del_init(&eobj->lru);
ebar = eobj->bar;
eobj->bar = NULL;
emap = eobj->map;
eobj->map = NULL;
}
mutex_unlock(&imem->base.mutex); if (!eobj) break;
iounmap(emap);
nvkm_vmm_put(vmm, &ebar);
}
if (ret == 0)
ret = nvkm_memory_map(memory, 0, vmm, bar, NULL, 0);
mutex_lock(&imem->base.mutex); if (ret || iobj->bar) { /* We either failed, or another thread beat us. */
mutex_unlock(&imem->base.mutex);
nvkm_vmm_put(vmm, &bar);
mutex_lock(&imem->base.mutex); return;
}
/* Make the mapping visible to the host. */
iobj->bar = bar;
iobj->map = ioremap_wc(device->func->resource_addr(device, NVKM_BAR2_INST) +
(u32)iobj->bar->addr, size); if (!iobj->map) {
nvkm_warn(subdev, "PRAMIN ioremap failed\n");
nvkm_vmm_put(vmm, &iobj->bar);
}
}
if (refcount_dec_and_mutex_lock(&iobj->maps, &imem->base.mutex)) { /* Add the now-unused mapping to the LRU instead of directly * unmapping it here, in case we need to map it again later.
*/ if (likely(iobj->lru.next) && iobj->map) {
BUG_ON(!list_empty(&iobj->lru));
list_add_tail(&iobj->lru, &imem->lru);
}
/* Switch back to NULL accessors when last map is gone. */
iobj->base.memory.ptrs = NULL;
mutex_unlock(&imem->base.mutex);
}
}
/* Already mapped? */ if (refcount_inc_not_zero(&iobj->maps)) { /* read barrier match the wmb on refcount set */
smp_rmb(); return iobj->map;
}
/* Take the lock, and re-check that another thread hasn't * already mapped the object in the meantime.
*/
mutex_lock(&imem->mutex); if (refcount_inc_not_zero(&iobj->maps)) {
mutex_unlock(&imem->mutex); return iobj->map;
}
/* Attempt to get a direct CPU mapping of the object. */ if ((vmm = nvkm_bar_bar2_vmm(imem->subdev.device))) { if (!iobj->map)
nv50_instobj_kmap(iobj, vmm);
map = iobj->map;
}
if (!refcount_inc_not_zero(&iobj->maps)) { /* Exclude object from eviction while it's being accessed. */ if (likely(iobj->lru.next))
list_del_init(&iobj->lru);
if (map)
iobj->base.memory.ptrs = &nv50_instobj_fast; else
iobj->base.memory.ptrs = &nv50_instobj_slow; /* barrier to ensure the ptrs are written before refcount is set */
smp_wmb();
refcount_set(&iobj->maps, 1);
}
/* Exclude bootstrapped objects (ie. the page tables for the * instmem BAR itself) from eviction.
*/
mutex_lock(&imem->mutex); if (likely(iobj->lru.next)) {
list_del_init(&iobj->lru);
iobj->lru.next = NULL;
}
mutex_lock(&imem->mutex); if (likely(iobj->lru.next))
list_del(&iobj->lru);
map = iobj->map;
bar = iobj->bar;
mutex_unlock(&imem->mutex);
if (map) { struct nvkm_vmm *vmm = nvkm_bar_bar2_vmm(imem->subdev.device);
iounmap(map); if (likely(vmm)) /* Can be NULL during BAR destructor. */
nvkm_vmm_put(vmm, &bar);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.