int __weak create_section_mapping(unsignedlong start, unsignedlong end, int nid, pgprot_t prot)
{ return -ENODEV;
}
int __weak remove_section_mapping(unsignedlong start, unsignedlong end)
{ return -ENODEV;
}
int __ref arch_create_linear_mapping(int nid, u64 start, u64 size, struct mhp_params *params)
{ int rc;
start = (unsignedlong)__va(start);
mutex_lock(&linear_mapping_mutex);
rc = create_section_mapping(start, start + size, nid,
params->pgprot);
mutex_unlock(&linear_mapping_mutex); if (rc) {
pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
start, start + size, rc); return -EFAULT;
} return 0;
}
void __ref arch_remove_linear_mapping(u64 start, u64 size)
{ int ret;
/* Remove htab bolted mappings for this section of memory */
start = (unsignedlong)__va(start);
mutex_lock(&linear_mapping_mutex);
ret = remove_section_mapping(start, start + size);
mutex_unlock(&linear_mapping_mutex); if (ret)
pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
start, start + size, ret);
/* Ensure all vmalloc mappings are flushed in case they also * hit that section of memory
*/
vm_unmap_aliases();
}
/* * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need * updating.
*/ staticvoid update_end_of_memory_vars(u64 start, u64 size)
{ unsignedlong end_pfn = PFN_UP(start + size);
/* * Zones usage: * * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be * everything else. GFP_DMA32 page allocations automatically fall back to * ZONE_DMA. * * By using 31-bit unconditionally, we can exploit zone_dma_limit to inform the * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by * ZONE_DMA.
*/ staticunsignedlong max_zone_pfns[MAX_NR_ZONES];
/* * paging_init() sets up the page tables - in fact we've already done this.
*/ void __init paging_init(void)
{ unsignedlonglong total_ram = memblock_phys_mem_size();
phys_addr_t top_of_ram = memblock_end_of_DRAM(); int zone_dma_bits;
#ifdef CONFIG_HIGHMEM unsignedlong v = __fix_to_virt(FIX_KMAP_END); unsignedlong end = __fix_to_virt(FIX_KMAP_BEGIN);
for (; v < end; v += PAGE_SIZE)
map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
(unsignedlonglong)top_of_ram, total_ram);
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
(longint)((top_of_ram - total_ram) >> 20));
/* * Allow 30-bit DMA for very limited Broadcom wifi chips on many * powerbooks.
*/ if (IS_ENABLED(CONFIG_PPC32))
zone_dma_bits = 30; else
zone_dma_bits = 31;
void __init arch_mm_preinit(void)
{ /* * book3s is limited to 16 page sizes due to encoding this in * a 4-bit field for slices.
*/
BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
#ifdef CONFIG_SWIOTLB /* * Some platforms (e.g. 85xx) limit DMA-able memory way below * 4G. We force memblock to bottom-up mode to ensure that the * memory allocated in swiotlb_init() is DMA-able. * As it's the last memblock allocation, no need to reset it * back to to-down.
*/
memblock_set_bottom_up(true);
swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags); #endif
kasan_late_init();
#ifdefined(CONFIG_PPC_E500) && !defined(CONFIG_SMP) /* * If smp is enabled, next_tlbcam_idx is initialized in the cpu up * functions.... do it here for the non-smp case.
*/
per_cpu(next_tlbcam_idx, smp_processor_id()) =
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif
}
/* * System memory should not be in /proc/iomem but various tools expect it * (eg kdump).
*/ staticint __init add_system_ram_resources(void)
{
phys_addr_t start, end;
u64 i;
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
WARN_ON(!res);
if (res) {
res->name = "System RAM";
res->start = start; /* * In memblock, end points to the first byte after * the range while in resourses, end points to the * last byte in the range.
*/
res->end = end - 1;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
WARN_ON(insert_resource(&iomem_resource, res) < 0);
}
}
#ifdef CONFIG_STRICT_DEVMEM /* * devmem_is_allowed(): check to see if /dev/mem access to a certain address * is valid. The argument is a physical page number. * * Access has to be given to non-kernel-ram areas as well, these contain the * PCI mmio resources as well as potential bios/acpi data regions.
*/ int devmem_is_allowed(unsignedlong pfn)
{ if (page_is_rtas_user_buf(pfn)) return 1; if (iomem_is_exclusive(PFN_PHYS(pfn))) return 0; if (!page_is_ram(pfn)) return 1; return 0;
} #endif/* CONFIG_STRICT_DEVMEM */
/* * This is defined in kernel/resource.c but only powerpc needs to export it, for * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
*/
EXPORT_SYMBOL_GPL(walk_system_ram_range);
/* * BOOK3S_32 and 8xx define MODULES_VADDR for text allocations and * allow allocating data in the entire vmalloc space
*/ #ifdef MODULES_VADDR unsignedlong limit = (unsignedlong)_etext - SZ_32M;
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
/* First try within 32M limit from _etext to avoid branch trampolines */ if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit) {
start = limit;
fallback_start = MODULES_VADDR;
fallback_end = MODULES_END;
} else {
start = MODULES_VADDR;
}
end = MODULES_END; #else
start = VMALLOC_START;
end = VMALLOC_END; #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.