/* * arch/sh/mm/pmb.c * * Privileged Space Mapping Buffer (PMB) Support. * * Copyright (C) 2005 - 2011 Paul Mundt * Copyright (C) 2010 Matt Fleming * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details.
*/ #include <linux/init.h> #include <linux/kernel.h> #include <linux/syscore_ops.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/err.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/pgtable.h> #include <asm/cacheflush.h> #include <linux/sizes.h> #include <linux/uaccess.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/mmu_context.h>
/* * Ensure that the PMB entries match our cache configuration. * * When we are in 32-bit address extended mode, CCR.CB becomes * invalid, so care must be taken to manually adjust cacheable * translations.
*/ static __always_inline unsignedlong pmb_cache_flags(void)
{ unsignedlong flags = 0;
staticbool pmb_mapping_exists(unsignedlong vaddr, phys_addr_t phys, unsignedlong size)
{ int i;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { struct pmb_entry *pmbe, *iter; unsignedlong span;
if (!test_bit(i, pmb_map)) continue;
pmbe = &pmb_entry_list[i];
/* * See if VPN and PPN are bounded by an existing mapping.
*/ if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) continue; if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) continue;
/* * Now see if we're in range of a simple mapping.
*/ if (size <= pmbe->size) {
read_unlock(&pmb_rwlock); returntrue;
}
span = pmbe->size;
/* * Finally for sizes that involve compound mappings, walk * the chain.
*/ for (iter = pmbe->link; iter; iter = iter->link)
span += iter->size;
/* * Nothing else to do if the range requirements are met.
*/ if (size <= span) {
read_unlock(&pmb_rwlock); returntrue;
}
}
read_unlock(&pmb_rwlock); returnfalse;
}
staticbool pmb_size_valid(unsignedlong size)
{ int i;
for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) if (pmb_sizes[i].size == size) returntrue;
/* * Link adjacent entries that span multiple PMB * entries for easier tear-down.
*/ if (likely(pmbp)) {
raw_spin_lock_nested(&pmbp->lock,
SINGLE_DEPTH_NESTING);
pmbp->link = pmbe;
raw_spin_unlock(&pmbp->lock);
}
pmbp = pmbe;
/* * Instead of trying smaller sizes on every * iteration (even if we succeed in allocating * space), try using pmb_sizes[i].size again.
*/
i--;
mapped++;
raw_spin_unlock_irqrestore(&pmbe->lock, flags);
}
} while (size >= SZ_16M);
/* * XXX: This should really start from uncached_end, but this * causes the MMU to reset, so for now we restrict it to the * 0xb000...0xc000 range.
*/
area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
P3SEG, caller); if (!area) return NULL;
int pmb_unmap(void __iomem *addr)
{ struct pmb_entry *pmbe = NULL; unsignedlong vaddr = (unsignedlong __force)addr; int i, found = 0;
read_lock(&pmb_rwlock);
for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { if (test_bit(i, pmb_map)) {
pmbe = &pmb_entry_list[i]; if (pmbe->vpn == vaddr) {
found = 1; break;
}
}
}
read_unlock(&pmb_rwlock);
if (found) {
pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); return 0;
}
return -EINVAL;
}
staticvoid __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
{ do { struct pmb_entry *pmblink = pmbe;
/* * We may be called before this pmb_entry has been * entered into the PMB table via set_pmb_entry(), but * that's OK because we've allocated a unique slot for * this entry in pmb_alloc() (even if we haven't filled * it yet). * * Therefore, calling __clear_pmb_entry() is safe as no * other mapping can be using that slot.
*/
__clear_pmb_entry(pmbe);
/* * Sync our software copy of the PMB mappings with those in hardware. The * mappings in the hardware PMB were either set up by the bootloader or * very early on by the kernel.
*/ staticvoid __init pmb_synchronize(void)
{ struct pmb_entry *pmbp = NULL; int i, j;
/* * Run through the initial boot mappings, log the established * ones, and blow away anything that falls outside of the valid * PPN range. Specifically, we only care about existing mappings * that impact the cached/uncached sections. * * Note that touching these can be a bit of a minefield; the boot * loader can establish multi-page mappings with the same caching * attributes, so we need to ensure that we aren't modifying a * mapping that we're presently executing from, or may execute * from in the case of straddling page boundaries. * * In the future we will have to tidy up after the boot loader by * jumping between the cached and uncached mappings and tearing * down alternating mappings while executing from the other.
*/ for (i = 0; i < NR_PMB_ENTRIES; i++) { unsignedlong addr, data; unsignedlong addr_val, data_val; unsignedlong ppn, vpn, flags; unsignedlong irqflags; unsignedint size; struct pmb_entry *pmbe;
for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) if (pmb_sizes[j].flag == size)
pmbe->size = pmb_sizes[j].size;
if (pmbp) {
raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); /* * Compare the previous entry against the current one to * see if the entries span a contiguous mapping. If so, * setup the entry links accordingly. Compound mappings * are later coalesced.
*/ if (pmb_can_merge(pmbp, pmbe))
pmbp->link = pmbe;
raw_spin_unlock(&pmbp->lock);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.