// SPDX-License-Identifier: GPL-2.0-or-later /* * Procedures for interfacing to Open Firmware. * * Paul Mackerras August 1996. * Copyright (C) 1996-2005 Paul Mackerras. * * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. * {engebret|bergner}@us.ibm.com
*/
#undef DEBUG_PROM
/* we cannot use FORTIFY as it brings in new symbols */ #define __NO_FORTIFY
/* All of prom_init bss lives here */ #define __prombss __section(".bss.prominit")
/* * Eventually bump that one up
*/ #define DEVTREE_CHUNK_SIZE 0x100000
/* * This is the size of the local memory reserve map that gets copied * into the boot params passed to the kernel. That size is totally * flexible as the kernel just reads the list until it encounters an * entry with size 0, so it can be changed without breaking binary * compatibility
*/ #define MEM_RESERVE_MAP_SIZE 8
/* * prom_init() is called very early on, before the kernel text * and data have been mapped to KERNELBASE. At this point the code * is running at whatever address it has been loaded at. * On ppc32 we compile with -mrelocatable, which means that references * to extern and static variables get relocated automatically. * ppc64 objects are always relocatable, we just need to relocate the * TOC. * * Because OF may have mapped I/O devices into the area starting at * KERNELBASE, particularly on CHRP machines, we can't safely call * OF once the kernel has been mapped to KERNELBASE. Therefore all * OF calls must be done within prom_init(). * * ADDR is used in calls to call_prom. The 4th and following * arguments to call_prom should be 32-bit values. * On ppc64, 64 bit values are truncated to 32 bits (and * fortunately don't get interpreted as two arguments).
*/ #define ADDR(x) (u32)(unsignedlong)(x)
/* Platforms codes are now obsolete in the kernel. Now only used within this * file and ultimately gone too. Feel free to change them if you need, they * are not shared with anything outside of this file anymore
*/ #define PLATFORM_PSERIES 0x0100 #define PLATFORM_PSERIES_LPAR 0x0101 #define PLATFORM_LPAR 0x0001 #define PLATFORM_POWERMAC 0x0400 #define PLATFORM_GENERIC 0x0500
/* * Error results ... some OF calls will return "-1" on error, some * will return 0, some will return either. To simplify, here are * macros to use with any ihandle or phandle return value to check if * it is valid
*/
// Copy up to n bytes for (i = 0; i < n && src[i] != '\0'; i++)
dest[i] = src[i];
rc = i;
// If we copied all n then we have run out of space for the nul if (rc == n) { // Rewind by one character to ensure nul termination
i--;
rc = -E2BIG;
}
for (p = msg; *p != 0; p = q) { for (q = p; *q != 0 && *q != '\n'; ++q)
; if (q > p)
call_prom("write", 3, 1, prom.stdout, p, q - p); if (*q == 0) break;
++q;
call_prom("write", 3, 1, prom.stdout, ADDR("\r\n"), 2);
}
}
/* * Both prom_print_hex & prom_print_dec takes an unsigned long as input so that * we do not need __udivdi3 or __umoddi3 on 32bits.
*/ staticvoid __init prom_print_hex(unsignedlong val)
{ int i, nibbles = sizeof(val)*2; char buf[sizeof(val)*2+1];
for (i = nibbles-1; i >= 0; i--) {
buf[i] = (val & 0xf) + '0'; if (buf[i] > '9')
buf[i] += ('a'-'0'-10);
val >>= 4;
}
buf[nibbles] = '\0';
call_prom("write", 3, 1, prom.stdout, buf, nibbles);
}
/* max number of decimal digits in an unsigned long */ #define UL_DIGITS 21 staticvoid __init prom_print_dec(unsignedlong val)
{ int i, size; char buf[UL_DIGITS+1];
for (i = UL_DIGITS-1; i >= 0; i--) {
buf[i] = (val % 10) + '0';
val = val/10; if (val == 0) break;
} /* shift stuff down */
size = UL_DIGITS - i;
call_prom("write", 3, 1, prom.stdout, buf+i, size);
}
__printf(1, 2) staticvoid __init prom_printf(constchar *format, ...)
{ constchar *p, *q, *s;
va_list args; unsignedlong v; long vs; int n = 0;
va_start(args, format); for (p = format; *p != 0; p = q) { for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q)
; if (q > p)
call_prom("write", 3, 1, prom.stdout, p, q - p); if (*q == 0) break; if (*q == '\n') {
++q;
call_prom("write", 3, 1, prom.stdout,
ADDR("\r\n"), 2); continue;
}
++q; if (*q == 0) break; while (*q == 'l') {
++q;
++n;
} switch (*q) { case's':
++q;
s = va_arg(args, constchar *);
prom_print(s); break; case'x':
++q; switch (n) { case 0:
v = va_arg(args, unsignedint); break; case 1:
v = va_arg(args, unsignedlong); break; case 2: default:
v = va_arg(args, unsignedlonglong); break;
}
prom_print_hex(v); break; case'u':
++q; switch (n) { case 0:
v = va_arg(args, unsignedint); break; case 1:
v = va_arg(args, unsignedlong); break; case 2: default:
v = va_arg(args, unsignedlonglong); break;
}
prom_print_dec(v); break; case'd':
++q; switch (n) { case 0:
vs = va_arg(args, int); break; case 1:
vs = va_arg(args, long); break; case 2: default:
vs = va_arg(args, longlong); break;
} if (vs < 0) {
prom_print("-");
vs = -vs;
}
prom_print_dec(vs); break;
}
}
va_end(args);
}
if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) { /* * Old OF requires we claim physical and virtual separately * and then map explicitly (assuming virtual mode)
*/ int ret;
prom_arg_t result;
ret = call_prom_ret("call-method", 5, 2, &result,
ADDR("claim"), prom.memory,
align, size, virt); if (ret != 0 || result == -1) return -1;
ret = call_prom_ret("call-method", 5, 2, &result,
ADDR("claim"), prom.mmumap,
align, size, virt); if (ret != 0) {
call_prom("call-method", 4, 1, ADDR("release"),
prom.memory, size, virt); return -1;
} /* the 0x12 is M (coherence) + PP == read/write */
call_prom("call-method", 6, 1,
ADDR("map"), prom.mmumap, 0x12, size, virt, virt); return virt;
} return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
(prom_arg_t)align);
}
staticvoid __init __attribute__((noreturn)) prom_panic(constchar *reason)
{
prom_print(reason); /* Do not call exit because it clears the screen on pmac
* it also causes some sort of double-fault on early pmacs */ if (of_platform == PLATFORM_POWERMAC) asm("trap\n");
/* ToDo: should put up an SRC here on pSeries */
call_prom("exit", 0, 0);
staticunsignedlong __init prom_strtoul(constchar *cp, constchar **endp)
{ unsignedlong result = 0, base = 10, value;
if (*cp == '0') {
base = 8;
cp++; if (prom_toupper(*cp) == 'X') {
cp++;
base = 16;
}
}
while (prom_isxdigit(*cp) &&
(value = prom_isdigit(*cp) ? *cp - '0' : prom_toupper(*cp) - 'A' + 10) < base) {
result = result * base + value;
cp++;
}
if (endp)
*endp = cp;
return result;
}
staticunsignedlong __init prom_memparse(constchar *ptr, constchar **retptr)
{ unsignedlong ret = prom_strtoul(ptr, retptr); int shift = 0;
/* * We can't use a switch here because GCC *may* generate a * jump table which won't work, because we're not running at * the address we're linked at.
*/ if ('G' == **retptr || 'g' == **retptr)
shift = 30;
/* * Early parsing of the command line passed to the kernel, used for * "mem=x" and the options that affect the iommu
*/ staticvoid __init early_cmdline_parse(void)
{ constchar *opt;
char *p; int l = 0;
prom_cmd_line[0] = 0;
p = prom_cmd_line;
if (!IS_ENABLED(CONFIG_CMDLINE_FORCE) && (long)prom.chosen > 0)
l = prom_getprop(prom.chosen, "bootargs", p, COMMAND_LINE_SIZE-1);
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) || l <= 0 || p[0] == '\0')
prom_strlcat(prom_cmd_line, " " CONFIG_CMDLINE, sizeof(prom_cmd_line));
prom_printf("command line: %s\n", prom_cmd_line);
#ifdef CONFIG_PPC64
opt = prom_strstr(prom_cmd_line, "iommu="); if (opt) {
prom_printf("iommu opt is: %s\n", opt);
opt += 6; while (*opt && *opt == ' ')
opt++; if (!prom_strncmp(opt, "off", 3))
prom_iommu_off = 1; elseif (!prom_strncmp(opt, "force", 5))
prom_iommu_force_on = 1;
} #endif
opt = prom_strstr(prom_cmd_line, "mem="); if (opt) {
opt += 4;
prom_memory_limit = prom_memparse(opt, (constchar **)&opt); #ifdef CONFIG_PPC64 /* Align down to 16 MB which is large page size with hash page translation */
prom_memory_limit = ALIGN_DOWN(prom_memory_limit, SZ_16M); #endif
}
#ifdef CONFIG_PPC_PSERIES /* * The architecture vector has an array of PVR mask/value pairs, * followed by # option vectors - 1, followed by the option vectors. * * See prom.h for the definition of the bits specified in the * architecture vector.
*/
/* Firmware expects the value to be n - 1, where n is the # of vectors */ #define NUM_VECTORS(n) ((n) - 1)
/* * Firmware expects 1 + n - 2, where n is the length of the option vector in * bytes. The 1 accounts for the length byte itself, the - 2 .. ?
*/ #define VECTOR_LENGTH(n) (1 + (n) - 2)
/* Pick up th first CPU node we can find */ for (node = 0; prom_next_node(&node); ) {
type[0] = 0;
prom_getprop(node, "device_type", type, sizeof(type));
if (prom_strcmp(type, "cpu")) continue; /* * There is an entry for each smt thread, each entry being * 4 bytes long. All cpus should have the same number of * smt threads, so return after finding the first.
*/
plen = prom_getproplen(node, "ibm,ppc-interrupt-server#s"); if (plen == PROM_ERROR) break;
plen >>= 2;
prom_debug("Found %lu smt threads per core\n", (unsignedlong)plen);
/* Sanity check */ if (plen < 1 || plen > 64) {
prom_printf("Threads per core %lu out of bounds, assuming 1\n",
(unsignedlong)plen); return 1;
} return plen;
}
prom_debug("No threads found, assuming 1 per core\n");
return 1;
}
staticvoid __init prom_parse_mmu_model(u8 val, struct platform_support *support)
{ switch (val) { case OV5_FEAT(OV5_MMU_DYNAMIC): case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */
prom_debug("MMU - either supported\n");
support->radix_mmu = !prom_radix_disable;
support->hash_mmu = true; break; case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */
prom_debug("MMU - radix only\n"); if (prom_radix_disable) { /* * If we __have__ to do radix, we're better off ignoring * the command line rather than not booting.
*/
prom_printf("WARNING: Ignoring cmdline option disable_radix\n");
}
support->radix_mmu = true; break; case OV5_FEAT(OV5_MMU_HASH):
prom_debug("MMU - hash only\n");
support->hash_mmu = true; break; default:
prom_debug("Unknown mmu support option: 0x%x\n", val); break;
}
}
staticvoid __init prom_parse_xive_model(u8 val, struct platform_support *support)
{ switch (val) { case OV5_FEAT(OV5_XIVE_EITHER): /* Either Available */
prom_debug("XIVE - either mode supported\n");
support->xive = !prom_xive_disable; break; case OV5_FEAT(OV5_XIVE_EXPLOIT): /* Only Exploitation mode */
prom_debug("XIVE - exploitation mode supported\n"); if (prom_xive_disable) { /* * If we __have__ to do XIVE, we're better off ignoring * the command line rather than not booting.
*/
prom_printf("WARNING: Ignoring cmdline option xive=off\n");
}
support->xive = true; break; case OV5_FEAT(OV5_XIVE_LEGACY): /* Only Legacy mode */
prom_debug("XIVE - legacy mode supported\n"); break; default:
prom_debug("Unknown xive support option: 0x%x\n", val); break;
}
}
/* * First copy the architecture vec template * * use memcpy() instead of *vec = *vec_template so that GCC replaces it * by __memcpy() when KASAN is active
*/
memcpy(&ibm_architecture_vec, &ibm_architecture_vec_template, sizeof(ibm_architecture_vec));
/* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */
prom_check_platform_support();
root = call_prom("open", 1, 1, ADDR("/")); if (root != 0) { /* We need to tell the FW about the number of cores we support. * * To do that, we count the number of threads on the first core * (we assume this is the same for all cores) and use it to * divide NR_CPUS.
*/
cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads());
prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n",
cores, NR_CPUS);
/* try calling the ibm,client-architecture-support method */
prom_printf("Calling ibm,client-architecture-support..."); if (call_prom_ret("call-method", 3, 2, &ret,
ADDR("ibm,client-architecture-support"),
root,
ADDR(&ibm_architecture_vec)) == 0) { /* the call exists... */ if (ret)
prom_printf("\nWARNING: ibm,client-architecture" "-support call FAILED!\n");
call_prom("close", 1, 0, root);
prom_printf(" done\n"); return;
}
call_prom("close", 1, 0, root);
prom_printf(" not implemented\n");
}
#ifdef __BIG_ENDIAN__
{
ihandle elfloader;
/* no ibm,client-architecture-support call, try the old way */
elfloader = call_prom("open", 1, 1,
ADDR("/packages/elf-loader")); if (elfloader == 0) {
prom_printf("couldn't open /packages/elf-loader\n"); return;
}
call_prom("call-method", 3, 1, ADDR("process-elf-header"),
elfloader, ADDR(&fake_elf));
call_prom("close", 1, 0, elfloader);
} #endif/* __BIG_ENDIAN__ */
} #endif/* CONFIG_PPC_PSERIES */
/* * Memory allocation strategy... our layout is normally: * * at 14Mb or more we have vmlinux, then a gap and initrd. In some * rare cases, initrd might end up being before the kernel though. * We assume this won't override the final kernel at 0, we have no * provision to handle that in this version, but it should hopefully * never happen. * * alloc_top is set to the top of RMO, eventually shrink down if the * TCEs overlap * * alloc_bottom is set to the top of kernel/initrd * * from there, allocations are done this way : rtas is allocated * topmost, and the device-tree is allocated from the bottom. We try * to grow the device-tree allocation as we progress. If we can't, * then we fail, we don't currently have a facility to restart * elsewhere, but that shouldn't be necessary. * * Note that calls to reserve_mem have to be done explicitly, memory * allocated with either alloc_up or alloc_down isn't automatically * reserved.
*/
/* * Allocates memory in the RMO upward from the kernel/initrd * * When align is 0, this is a special case, it means to allocate in place * at the current location of alloc_bottom or fail (that is basically * extending the previous allocation). Used for the device-tree flattening
*/ staticunsignedlong __init alloc_up(unsignedlong size, unsignedlong align)
{ unsignedlong base = alloc_bottom; unsignedlong addr = 0;
if (align)
base = ALIGN(base, align);
prom_debug("%s(%lx, %lx)\n", __func__, size, align); if (ram_top == 0)
prom_panic("alloc_up() called with mem not initialized\n");
if (align)
base = ALIGN(alloc_bottom, align); else
base = alloc_bottom;
/* * Allocates memory downward, either from top of RMO, or if highmem * is set, from the top of RAM. Note that this one doesn't handle * failures. It does claim memory if highmem is not set.
*/ staticunsignedlong __init alloc_down(unsignedlong size, unsignedlong align, int highmem)
{ unsignedlong base, addr = 0;
prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align,
highmem ? "(high)" : "(low)"); if (ram_top == 0)
prom_panic("alloc_down() called with mem not initialized\n");
if (highmem) { /* Carve out storage for the TCE table. */
addr = ALIGN_DOWN(alloc_top_high - size, align); if (addr <= alloc_bottom) return 0; /* Will we bump into the RMO ? If yes, check out that we * didn't overlap existing allocations there, if we did, * we are dead, we must be the first in town !
*/ if (addr < rmo_top) { /* Good, we are first */ if (alloc_top == rmo_top)
alloc_top = rmo_top = addr; else return 0;
}
alloc_top_high = addr; goto bail;
}
base = ALIGN_DOWN(alloc_top - size, align); for (; base > alloc_bottom;
base = ALIGN_DOWN(base - 0x100000, align)) {
prom_debug(" trying: 0x%lx\n\r", base);
addr = (unsignedlong)prom_claim(base, size, 0); if (addr != PROM_ERROR && addr != 0) break;
addr = 0;
} if (addr == 0) return 0;
alloc_top = addr;
/* Ignore more than 2 cells */ while (s > sizeof(unsignedlong) / 4) {
p++;
s--;
}
r = be32_to_cpu(*p++); #ifdef CONFIG_PPC64 if (s > 1) {
r <<= 32;
r |= be32_to_cpu(*(p++));
} #endif
*cellp = p; return r;
}
/* * Very dumb function for adding to the memory reserve list, but * we don't need anything smarter at this point * * XXX Eventually check for collisions. They should NEVER happen. * If problems seem to show up, it would be a good start to track * them down.
*/ staticvoid __init reserve_mem(u64 base, u64 size)
{
u64 top = base + size; unsignedlong cnt = mem_reserve_cnt;
if (size == 0) return;
/* We need to always keep one empty entry so that we * have our terminator with "size" set to 0 since we are * dumb and just copy this entire array to the boot params
*/
base = ALIGN_DOWN(base, PAGE_SIZE);
top = ALIGN(top, PAGE_SIZE);
size = top - base;
/* * Initialize memory allocation mechanism, parse "memory" nodes and * obtain that way the top of memory and RMO to setup out local allocator
*/ staticvoid __init prom_init_mem(void)
{
phandle node; char type[64]; unsignedint plen;
cell_t *p, *endp;
__be32 val;
u32 rac, rsc;
/* * We iterate the memory nodes to find * 1) top of RMO (first node) * 2) top of memory
*/
val = cpu_to_be32(2);
prom_getprop(prom.root, "#address-cells", &val, sizeof(val));
rac = be32_to_cpu(val);
val = cpu_to_be32(1);
prom_getprop(prom.root, "#size-cells", &val, sizeof(rsc));
rsc = be32_to_cpu(val);
prom_debug("root_addr_cells: %x\n", rac);
prom_debug("root_size_cells: %x\n", rsc);
if (type[0] == 0) { /* * CHRP Longtrail machines have no device_type * on the memory node, so check the name instead...
*/
prom_getprop(node, "name", type, sizeof(type));
} if (prom_strcmp(type, "memory")) continue;
plen = prom_getprop(node, "reg", regbuf, sizeof(regbuf)); if (plen > sizeof(regbuf)) {
prom_printf("memory node too large for buffer !\n");
plen = sizeof(regbuf);
}
p = regbuf;
endp = p + (plen / sizeof(cell_t));
/* * If prom_memory_limit is set we reduce the upper limits *except* for * alloc_top_high. This must be the real top of RAM so we can put * TCE's up there.
*/
/* * Setup our top alloc point, that is top of RMO or top of * segment 0 when running non-LPAR. * Some RS64 machines have buggy firmware where claims up at * 1GB fail. Cap at 768MB as a workaround. * Since 768MB is plenty of room, and we need to cap to something * reasonable on 32-bit, cap at 768MB on all machines.
*/ if (!rmo_top)
rmo_top = ram_top;
rmo_top = min(0x30000000ul, rmo_top);
alloc_top = rmo_top;
alloc_top_high = ram_top;
/* * Check if we have an initrd after the kernel but still inside * the RMO. If we do move our bottom point to after it.
*/ if (prom_initrd_start &&
prom_initrd_start < rmo_top &&
prom_initrd_end > alloc_bottom)
alloc_bottom = PAGE_ALIGN(prom_initrd_end);
/* * Even though we read what OF wants, we just set the table * size to 4 MB. This is enough to map 2GB of PCI DMA space. * By doing this, we avoid the pitfalls of trying to DMA to * MMIO space and the DMA alias hole.
*/
minsize = 4UL << 20;
/* Align to the greater of the align or size */
align = max(minalign, minsize);
base = alloc_down(minsize, align, 1); if (base == 0)
prom_panic("ERROR, cannot find space for TCE table.\n"); if (base < local_alloc_bottom)
local_alloc_bottom = base;
/* It seems OF doesn't null-terminate the path :-( */
memset(path, 0, sizeof(prom_scratch)); /* Call OF to setup the TCE hardware */ if (call_prom("package-to-path", 3, 1, node,
path, sizeof(prom_scratch) - 1) == PROM_ERROR) {
prom_printf("package-to-path failed\n");
}
/* Save away the TCE table attributes for later use. */
prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
/* Initialize the table to have a one-to-one mapping * over the allocated size.
*/
tce_entryp = (u64 *)base; for (i = 0; i < (minsize >> 3) ;tce_entryp++, i++) {
tce_entry = (i << PAGE_SHIFT);
tce_entry |= 0x3;
*tce_entryp = tce_entry;
}
/* These are only really needed if there is a memory limit in
* effect, but we don't know so export them always. */
prom_tce_alloc_start = local_alloc_bottom;
prom_tce_alloc_end = local_alloc_top;
/* Flag the first invalid entry */
prom_debug("ending prom_initialize_tce_table\n");
} #endif/* __BIG_ENDIAN__ */ #endif/* CONFIG_PPC64 */
/* * With CHRP SMP we need to use the OF to start the other processors. * We can't wait until smp_boot_cpus (the OF is trashed by then) * so we have to put the processors into a holding pattern controlled * by the kernel (not OF) before we destroy the OF. * * This uses a chunk of low memory, puts some holding pattern * code there and sends the other processors off to there until * smp_boot_cpus tells them to do something. The holding pattern * checks that address until its cpu # is there, when it is that * cpu jumps to __secondary_start(). smp_boot_cpus() takes care * of setting those values. * * We also use physical address 0x4 here to tell when a cpu * is in its holding pattern code. * * -- Cort
*/ /* * We want to reference the copy of __secondary_hold_* in the * 0 - 0x100 address range
*/ #define LOW_ADDR(x) (((unsignedlong) &(x)) & 0xff)
/* * On pseries, if RTAS supports "query-cpu-stopped-state", * we skip this stage, the CPUs will be started by the * kernel using RTAS.
*/ if ((of_platform == PLATFORM_PSERIES ||
of_platform == PLATFORM_PSERIES_LPAR) &&
rtas_has_query_cpu_stopped) {
prom_printf("prom_hold_cpus: skipped\n"); return;
}
/* Set the common spinloop variable, so all of the secondary cpus * will block when they are awakened from their OF spinloop. * This must occur for both SMP and non SMP kernels, since OF will * be trashed when we move the kernel.
*/
*spinloop = 0;
/* look for cpus */ for (node = 0; prom_next_node(&node); ) { unsignedint cpu_no;
__be32 reg;
/* Init the acknowledge var which will be reset by * the secondary cpu when it awakens from its OF * spinloop.
*/
*acknowledge = (unsignedlong)-1;
if (cpu_no != prom.cpu) { /* Primary Thread of non-boot cpu or any thread */
prom_printf("starting cpu hw idx %u... ", cpu_no);
call_prom("start-cpu", 3, 0, node,
secondary_hold, cpu_no);
for (i = 0; (i < 100000000) &&
(*acknowledge == ((unsignedlong)-1)); i++ )
mb();
staticvoid __init prom_init_client_services(unsignedlong pp)
{ /* Get a handle to the prom entry point before anything else */
prom_entry = pp;
/* get a handle for the stdout device */
prom.chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); if (!PHANDLE_VALID(prom.chosen))
prom_panic("cannot find chosen"); /* msg won't be printed :( */
/* get device tree root */
prom.root = call_prom("finddevice", 1, 1, ADDR("/")); if (!PHANDLE_VALID(prom.root))
prom_panic("cannot find device tree root"); /* msg won't be printed :( */
prom.mmumap = 0;
}
#ifdef CONFIG_PPC32 /* * For really old powermacs, we need to map things we claim. * For that, we need the ihandle of the mmu. * Also, on the longtrail, we need to work around other bugs.
*/ staticvoid __init prom_find_mmu(void)
{
phandle oprom; char version[64];
if (prom_getprop(prom.chosen, "stdout", &val, sizeof(val)) <= 0)
prom_panic("cannot find stdout");
prom.stdout = be32_to_cpu(val);
/* Get the full OF pathname of the stdout device */
memset(path, 0, 256);
call_prom("instance-to-path", 3, 1, prom.stdout, path, 255);
prom_printf("OF stdout device is: %s\n", of_stdout_device);
prom_setprop(prom.chosen, "/chosen", "linux,stdout-path",
path, prom_strlen(path) + 1);
/* instance-to-package fails on PA-Semi */
stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); if (stdout_node != PROM_ERROR) {
val = cpu_to_be32(stdout_node);
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(stdout_node, "device_type", type, sizeof(type)); if (prom_strcmp(type, "display") == 0)
prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0);
}
}
staticint __init prom_find_machine_type(void)
{ staticchar compat[256] __prombss; int len, i = 0; #ifdef CONFIG_PPC64
phandle rtas; int x; #endif
/* Look for a PowerMac or a Cell */
len = prom_getprop(prom.root, "compatible",
compat, sizeof(compat)-1); if (len > 0) {
compat[len] = 0; while (i < len) { char *p = &compat[i]; int sl = prom_strlen(p); if (sl == 0) break; if (prom_strstr(p, "Power Macintosh") ||
prom_strstr(p, "MacRISC")) return PLATFORM_POWERMAC; #ifdef CONFIG_PPC64 /* We must make sure we don't detect the IBM Cell * blades as pSeries due to some firmware issues, * so we do it here.
*/ if (prom_strstr(p, "IBM,CBEA") ||
prom_strstr(p, "IBM,CPBW-1.0")) return PLATFORM_GENERIC; #endif/* CONFIG_PPC64 */
i += sl + 1;
}
} #ifdef CONFIG_PPC64 /* Try to figure out if it's an IBM pSeries or any other * PAPR compliant platform. We assume it is if : * - /device_type is "chrp" (please, do NOT use that for future * non-IBM designs ! * - it has /rtas
*/
len = prom_getprop(prom.root, "device_type",
compat, sizeof(compat)-1); if (len <= 0) return PLATFORM_GENERIC; if (prom_strcmp(compat, "chrp")) return PLATFORM_GENERIC;
/* Default to pSeries. We need to know if we are running LPAR */
rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); if (!PHANDLE_VALID(rtas)) return PLATFORM_GENERIC;
x = prom_getproplen(rtas, "ibm,hypertas-functions"); if (x != PROM_ERROR) {
prom_debug("Hypertas detected, assuming LPAR !\n"); return PLATFORM_PSERIES_LPAR;
} return PLATFORM_PSERIES; #else return PLATFORM_GENERIC; #endif
}
staticint __init prom_set_color(ihandle ih, int i, int r, int g, int b)
{ return call_prom("call-method", 6, 1, ADDR("color!"), ih, i, b, g, r);
}
/* * If we have a display that we don't know how to drive, * we will want to try to execute OF's open method for it * later. However, OF will probably fall over if we do that * we've taken over the MMU. * So we check whether we will need to open the display, * and if so, open it now.
*/ staticvoid __init prom_check_displays(void)
{ char type[16], *path;
phandle node;
ihandle ih; int i;
prom_debug("Looking for displays\n"); for (node = 0; prom_next_node(&node); ) {
memset(type, 0, sizeof(type));
prom_getprop(node, "device_type", type, sizeof(type)); if (prom_strcmp(type, "display") != 0) continue;
/* It seems OF doesn't null-terminate the path :-( */
path = prom_scratch;
memset(path, 0, sizeof(prom_scratch));
/* * leave some room at the end of the path for appending extra * arguments
*/ if (call_prom("package-to-path", 3, 1, node, path, sizeof(prom_scratch) - 10) == PROM_ERROR) continue;
prom_printf("found display : %s, opening... ", path);
ih = call_prom("open", 1, 1, path); if (ih == 0) {
prom_printf("failed\n"); continue;
}
/* Setup a usable color table when the appropriate
* method is available. Should update this to set-colors */
clut = default_colors; for (i = 0; i < 16; i++, clut += 3) if (prom_set_color(ih, i, clut[0], clut[1],
clut[2]) != 0) break;
#ifdef CONFIG_LOGO_LINUX_CLUT224
clut = PTRRELOC(logo_linux_clut224.clut); for (i = 0; i < logo_linux_clut224.clutsize; i++, clut += 3) if (prom_set_color(ih, i + 32, clut[0], clut[1],
clut[2]) != 0) break; #endif/* CONFIG_LOGO_LINUX_CLUT224 */
s = os = (char *)dt_string_start;
s += 4; while (s < (char *)dt_string_end) { if (prom_strcmp(s, str) == 0) return s - os;
s += prom_strlen(s) + 1;
} return 0;
}
/* * The Open Firmware 1275 specification states properties must be 31 bytes or * less, however not all firmwares obey this. Make it 64 bytes to be safe.
*/ #define MAX_PROPERTY_NAME 64
/* get and store all property names */
prev_name = ""; for (;;) { /* 64 is max len of name including nul. */
namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); if (call_prom("nextprop", 3, 1, node, prev_name, namep) != 1) { /* No more nodes: unwind alloc */
*mem_start = (unsignedlong)namep; break;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.