/** * sq_flush_range - Flush (prefetch) a specific SQ range * @start: the store queue address to start flushing from * @len: the length to flush * * Flushes the store queue cache from @start to @start + @len in a * linear fashion.
*/ void sq_flush_range(unsignedlong start, unsignedint len)
{ unsignedlong *sq = (unsignedlong *)start;
/* Flush the queues */ for (len >>= 5; len--; sq += 8)
prefetchw(sq);
/* Wait for completion */
store_queue_barrier();
}
EXPORT_SYMBOL(sq_flush_range);
if (ioremap_page_range((unsignedlong)vma->addr,
(unsignedlong)vma->addr + map->size,
vma->phys_addr, prot)) {
vunmap(vma->addr); return -EAGAIN;
} #else /* * Without an MMU (or with it turned off), this is much more * straightforward, as we can just load up each queue's QACR with * the physical address appropriately masked.
*/
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1); #endif
return 0;
}
/** * sq_remap - Map a physical address through the Store Queues * @phys: Physical address of mapping. * @size: Length of mapping. * @name: User invoking mapping. * @prot: Protection bits. * * Remaps the physical address @phys through the next available store queue * address of @size length. @name is logged at boot time as well as through * the sysfs interface.
*/ unsignedlong sq_remap(unsignedlong phys, unsignedint size, constchar *name, pgprot_t prot)
{ struct sq_mapping *map; unsignedlong end; unsignedint psz; int ret, page;
/* Don't allow wraparound or zero size */
end = phys + size - 1; if (unlikely(!size || end < phys)) return -EINVAL; /* Don't allow anyone to remap normal memory.. */ if (unlikely(phys < virt_to_phys(high_memory))) return -EINVAL;
/** * sq_unmap - Unmap a Store Queue allocation * @vaddr: Pre-allocated Store Queue mapping. * * Unmaps the store queue allocation @map that was previously created by * sq_remap(). Also frees up the pte that was previously inserted into * the kernel page table and discards the UTLB translation.
*/ void sq_unmap(unsignedlong vaddr)
{ struct sq_mapping **p, *map; int page;
for (p = &sq_mapping_list; (map = *p); p = &map->next) if (map->sq_addr == vaddr) break;
if (unlikely(!map)) {
printk("%s: bad store queue address 0x%08lx\n",
__func__, vaddr); return;
}
/* * Needlessly complex sysfs interface. Unfortunately it doesn't seem like * there is any other easy way to add things on a per-cpu basis without * putting the directory entries somewhere stupid and having to create * links in sysfs by hand back in to the per-cpu directories. * * Some day we may want to have an additional abstraction per store * queue, but considering the kobject hell we already have to deal with, * it's simply not worth the trouble.
*/ staticstruct kobject *sq_kobject[NR_CPUS];
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.