/* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA * allocations. This must be the smallest DMA mask in the system, * so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit; unsignedlong arm_dma_pfn_limit; #endif
/* * If address less than pageblock_size bytes away from a present * memory chunk there still will be a memory map entry for it * because we round freed memory map to the pageblock boundaries.
*/ if (memblock_overlaps_region(&memblock.memory,
ALIGN_DOWN(addr, pageblock_size),
pageblock_size)) return 1;
/* * sparse_init() tries to allocate memory from memblock, so must be * done after the fixed reservations
*/
sparse_init();
/* * Now free the memory - free_area_init needs * the sparse mem_map arrays initialized by sparse_init() * for memmap_init_zone(), otherwise all PFNs are invalid.
*/
zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
}
/* * Poison init memory with an undefined instruction (ARM) or a branch to an * undefined instruction (Thumb).
*/ staticinlinevoid poison_init_mem(void *s, size_t count)
{
u32 *p = (u32 *)s; for (; count != 0; count -= 4)
*p++ = 0xe7fddef0;
}
#ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */
memblock_phys_free(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); #endif
/* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already.
*/ #ifdef CONFIG_MMU
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
BUG_ON(TASK_SIZE > MODULES_VADDR); #endif
/* * Updates section permissions only for the current mm (sections are * copied into each mm). During startup, this is the init_mm. Is only * safe to be called with preemption disabled, as under stop_machine().
*/ staticinlinevoid section_update(unsignedlong addr, pmdval_t mask,
pmdval_t prot, struct mm_struct *mm)
{
pmd_t *pmd;
/* Make sure extended page tables are in use. */ staticinlinebool arch_has_strict_perms(void)
{ if (cpu_architecture() < CPU_ARCH_ARMv6) returnfalse;
return !!(get_cr() & CR_XP);
}
staticvoid set_section_perms(struct section_perm *perms, int n, bool set, struct mm_struct *mm)
{
size_t i; unsignedlong addr;
if (!arch_has_strict_perms()) return;
for (i = 0; i < n; i++) { if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
perms[i].name, perms[i].start, perms[i].end,
SECTION_SIZE); continue;
}
for (addr = perms[i].start;
addr < perms[i].end;
addr += SECTION_SIZE)
section_update(addr, perms[i].mask,
set ? perms[i].prot : perms[i].clear, mm);
}
}
/* * update_sections_early intended to be called only through stop_machine * framework and executed by only one CPU while all other CPUs will spin and * wait, so no locking is required in this function.
*/ staticvoid update_sections_early(struct section_perm perms[], int n)
{ struct task_struct *t, *s;
for_each_process(t) { if (t->flags & PF_KTHREAD) continue;
for_each_thread(t, s) if (s->mm)
set_section_perms(perms, n, true, s->mm);
}
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
#ifdef CONFIG_XIP_KERNEL /* * The XIP kernel text is mapped in the module area for modules and * some other stuff to work without any indirect relocations. * MODULES_VADDR is redefined here and not in asm/memory.h to avoid * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
*/ #undef MODULES_VADDR #define MODULES_VADDR (((unsignedlong)_exiprom + ~PMD_MASK) & PMD_MASK) #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.