if ((bdiff % subreg) || (sdiff % subreg)) returnfalse;
bslots = bdiff / subreg;
sslots = sdiff / subreg;
if (bslots || sslots) { int i;
if (subreg < PMSAv7_MIN_SUBREG_SIZE) returnfalse;
if (bslots + sslots > PMSAv7_NR_SUBREGS) returnfalse;
for (i = 0; i < bslots; i++)
_set_bit(i, ®ion->subreg);
for (i = 1; i <= sslots; i++)
_set_bit(PMSAv7_NR_SUBREGS - i, ®ion->subreg);
}
region->base = abase;
region->size = p2size;
returntrue;
}
staticint __init allocate_region(phys_addr_t base, phys_addr_t size, unsignedint limit, struct region *regions)
{ int count = 0;
phys_addr_t diff = size; int attempts = MPU_MAX_REGIONS;
while (diff) { /* Try cover region as is (maybe with help of subregions) */ if (try_split_region(base, size, ®ions[count])) {
count++;
base += size;
diff -= size;
size = diff;
} else { /* * Maximum aligned region might overflow phys_addr_t * if "base" is 0. Hence we keep everything below 4G * until we take the smaller of the aligned region * size ("asize") and rounded region size ("p2size"), * one of which is guaranteed to be smaller than the * maximum physical address.
*/
phys_addr_t asize = (base - 1) ^ base;
phys_addr_t p2size = (1 << __fls(diff)) - 1;
/* We need to keep one slot for background region */
mem_max_regions--;
#ifndef CONFIG_CPU_V7M /* ... and one for vectors */
mem_max_regions--; #endif
#ifdef CONFIG_XIP_KERNEL /* plus some regions to cover XIP ROM */
num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
mem_max_regions, xip);
/* * Initially only use memory continuous from
* PHYS_OFFSET */ if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
mem_start = reg_start;
mem_end = reg_end;
specified_mem_size = mem_end - mem_start;
first = false;
} else { /* * memblock auto merges contiguous blocks, remove * all blocks afterwards in one go (we can't remove * blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
&mem_end, ®_start);
memblock_remove(reg_start, 0 - reg_start); break;
}
}
memset(mem, 0, sizeof(mem));
num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
for (i = 0; i < num; i++) { unsignedlong subreg = mem[i].size / PMSAv7_NR_SUBREGS;
if (total_mem_size != specified_mem_size) {
pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
&specified_mem_size, &total_mem_size);
memblock_remove(mem_start + total_mem_size,
specified_mem_size - total_mem_size);
}
}
staticint __init __mpu_max_regions(void)
{ /* * We don't support a different number of I/D side regions so if we * have separate instruction and data memory maps then return * whichever side has a smaller number of supported regions.
*/
u32 dregions, iregions, mpuir;
/* We've kept a region free for this probing */
rgnr_write(PMSAv7_PROBE_REGION);
isb(); /* * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum * region order
*/
drbar_write(0xFFFFFFFC);
drbar_result = irbar_result = drbar_read();
drbar_write(0x0); /* If the MPU is non-unified, we use the larger of the two minima*/ if (mpu_iside_independent()) {
irbar_write(0xFFFFFFFC);
irbar_result = irbar_read();
irbar_write(0x0);
}
isb(); /* Ensure that MPU region operations have completed */ /* Return whichever result is larger */
/* We kept a region free for probing resolution of MPU regions*/ if (number > mpu_max_regions
|| number >= MPU_MAX_REGIONS) return -ENOENT;
if (size_order > 32) return -ENOMEM;
if (size_order < mpu_min_region_order) return -ENOMEM;
/* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
size_data = ((size_order - 1) << PMSAv7_RSR_SZ) | 1 << PMSAv7_RSR_EN;
size_data |= subregions << PMSAv7_RSR_SD;
if (need_flush)
flush_cache_all();
dsb(); /* Ensure all previous data accesses occur with old mappings */
rgnr_write(number);
isb();
drbar_write(start);
dracr_write(properties);
isb(); /* Propagate properties before enabling region */
drsr_write(size_data);
/* Check for independent I-side registers */ if (mpu_iside_independent()) {
irbar_write(start);
iracr_write(properties);
isb();
irsr_write(size_data);
}
isb();
/* Store region info (we treat i/d side the same, so only store d) */
mpu_rgn_info.rgns[number].dracr = properties;
mpu_rgn_info.rgns[number].drbar = start;
mpu_rgn_info.rgns[number].drsr = size_data;
mpu_rgn_info.used++;
return 0;
}
/* * Set up default MPU regions, doing nothing if there is no MPU
*/ void __init pmsav7_setup(void)
{ int i, region = 0, err = 0;
#ifdef CONFIG_XIP_KERNEL /* ROM */ for (i = 0; i < ARRAY_SIZE(xip); i++) { /* * In case we overwrite RAM region we set earlier in * head-nommu.S (which is cachable) all subsequent * data access till we setup RAM bellow would be done * with BG region (which is uncachable), thus we need * to clean and invalidate cache.
*/ bool need_flush = region == PMSAv7_RAM_REGION;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.