// SPDX-License-Identifier: GPL-2.0-or-later /* * * Procedures for interfacing to the RTAS on CHRP machines. * * Peter Bergner, IBM March 2001. * Copyright (C) 2001 IBM.
*/
struct rtas_filter { /* Indexes into the args buffer, -1 if not used */ constint buf_idx1; constint size_idx1; constint buf_idx2; constint size_idx2; /* * Assumed buffer size per the spec if the function does not * have a size parameter, e.g. ibm,errinjct. 0 if unused.
*/ constint fixed_size;
};
/** * struct rtas_function - Descriptor for RTAS functions. * * @token: Value of @name if it exists under the /rtas node. * @name: Function name. * @filter: If non-NULL, invoking this function via the rtas syscall is * generally allowed, and @filter describes constraints on the * arguments. See also @banned_for_syscall_on_le. * @banned_for_syscall_on_le: Set when call via sys_rtas is generally allowed * but specifically restricted on ppc64le. Such * functions are believed to have no users on * ppc64le, and we want to keep it that way. It does * not make sense for this to be set when @filter * is NULL. * @lock: Pointer to an optional dedicated per-function mutex. This * should be set for functions that require multiple calls in * sequence to complete a single operation, and such sequences * will disrupt each other if allowed to interleave. Users of * this function are required to hold the associated lock for * the duration of the call sequence. Add an explanatory * comment to the function table entry if setting this member.
*/ struct rtas_function {
s32 token; constbool banned_for_syscall_on_le:1; constchar * const name; conststruct rtas_filter *filter; struct mutex *lock;
};
/* * Nearly all RTAS calls need to be serialized. All uses of the * default rtas_args block must hold rtas_lock. * * Exceptions to the RTAS serialization requirement (e.g. stop-self) * must use a separate rtas_args structure.
*/ static DEFINE_RAW_SPINLOCK(rtas_lock); staticstruct rtas_args rtas_args;
/** * rtas_function_token() - RTAS function token lookup. * @handle: Function handle, e.g. RTAS_FN_EVENT_SCAN. * * Context: Any context. * Return: the token value for the function if implemented by this platform, * otherwise RTAS_UNKNOWN_SERVICE.
*/
s32 rtas_function_token(const rtas_fn_handle_t handle)
{ const size_t index = handle.index; constbool out_of_bounds = index >= ARRAY_SIZE(rtas_function_table);
if (WARN_ONCE(out_of_bounds, "invalid function index %zu", index)) return RTAS_UNKNOWN_SERVICE; /* * Various drivers attempt token lookups on non-RTAS * platforms.
*/ if (!rtas.dev) return RTAS_UNKNOWN_SERVICE;
/* * Boot-time initialization of the function table needs the lookup to * return a non-const-qualified object. Use rtas_name_to_function() * in all other contexts.
*/ staticstruct rtas_function *__rtas_name_to_function(constchar *name)
{ conststruct rtas_function key = {
.name = name,
}; struct rtas_function *found;
found = bsearch(&key, rtas_function_table, ARRAY_SIZE(rtas_function_table), sizeof(rtas_function_table[0]), rtas_function_cmp);
/* * For use by sys_rtas(), where the token value is provided by user * space and we don't want to warn on failed lookups.
*/ staticconststruct rtas_function *rtas_token_to_function_untrusted(s32 token)
{ return xa_load(&rtas_token_to_function_xarray, token);
}
/* * Reverse lookup for deriving the function descriptor from a * known-good token value in contexts where the former is not already * available. @token must be valid, e.g. derived from the result of a * prior lookup against the function table.
*/ staticconststruct rtas_function *rtas_token_to_function(s32 token)
{ conststruct rtas_function *func;
if (WARN_ONCE(token < 0, "invalid token %d", token)) return NULL;
func = rtas_token_to_function_untrusted(token); if (func) return func; /* * Fall back to linear scan in case the reverse mapping hasn't * been initialized yet.
*/ if (xa_empty(&rtas_token_to_function_xarray)) {
for_each_rtas_function(func) { if (func->token == token) return func;
}
}
staticvoid do_enter_rtas(struct rtas_args *args)
{ constunsignedlong msr = mfmsr(); /* * Situations where we want to skip any active tracepoints for * safety reasons: * * 1. The last code executed on an offline CPU as it stops, * i.e. we're about to call stop-self. The tracepoints' * function name lookup uses xarray, which uses RCU, which * isn't valid to call on an offline CPU. Any events * emitted on an offline CPU will be discarded anyway. * * 2. In real mode, as when invoking ibm,nmi-interlock from * the pseries MCE handler. We cannot count on trace * buffers or the entries in rtas_token_to_function_xarray * to be contained in the RMO.
*/ constunsignedlong mask = MSR_IR | MSR_DR; constbool can_trace = likely(cpu_online(raw_smp_processor_id()) &&
(msr & mask) == mask); /* * Make sure MSR[RI] is currently enabled as it will be forced later * in enter_rtas.
*/
BUG_ON(!(msr & MSR_RI));
BUG_ON(!irqs_disabled());
hard_irq_disable(); /* Ensure MSR[EE] is disabled on PPC64 */
if (can_trace)
__do_enter_rtas_trace(args); else
__do_enter_rtas(args);
}
/* * If non-NULL, this gets called when the kernel terminates. * This is done like this so rtas_flash can be a module.
*/ void (*rtas_flash_term_hook)(int);
EXPORT_SYMBOL_GPL(rtas_flash_term_hook);
/* * call_rtas_display_status and call_rtas_display_status_delay * are designed only for very early low-level debugging, which * is why the token is hard-coded to 10.
*/ staticvoid call_rtas_display_status(unsignedchar c)
{ unsignedlong flags;
staticvoid call_rtas_display_status_delay(char c)
{ staticint pending_newline = 0; /* did last write end with unprinted newline? */ staticint width = 16;
if (c == '\n') { while (width-- > 0)
call_rtas_display_status(' ');
width = 16;
mdelay(500);
pending_newline = 1;
} else { if (pending_newline) {
call_rtas_display_status('\r');
call_rtas_display_status('\n');
}
pending_newline = 0; if (width--) {
call_rtas_display_status(c);
udelay(10000);
}
}
}
if (display_character == RTAS_UNKNOWN_SERVICE) { /* use hex display if available */ if (set_indicator != RTAS_UNKNOWN_SERVICE)
rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex); return;
}
spin_lock(&progress_lock);
/* * Last write ended with newline, but we didn't print it since * it would just clear the bottom line of output. Print it now * instead. * * If no newline is pending and form feed is supported, clear the * display with a form feed; otherwise, print a CR to start output * at the beginning of the line.
*/ if (pending_newline) {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
pending_newline = 0;
} else {
current_line = 0; if (form_feed)
rtas_call(display_character, 1, 1, NULL,
(char)form_feed); else
rtas_call(display_character, 1, 1, NULL, '\r');
}
if (row_width)
width = row_width[current_line]; else
width = display_width;
os = s; while (*os) { if (*os == '\n' || *os == '\r') { /* If newline is the last character, save it * until next call to avoid bumping up the * display output.
*/ if (*os == '\n' && !os[1]) {
pending_newline = 1;
current_line++; if (current_line > display_lines-1)
current_line = display_lines-1;
spin_unlock(&progress_lock); return;
}
/* RTAS wants CR-LF, not just LF */
if (*os == '\n') {
rtas_call(display_character, 1, 1, NULL, '\r');
rtas_call(display_character, 1, 1, NULL, '\n');
} else { /* CR might be used to re-draw a line, so we'll * leave it alone and not add LF.
*/
rtas_call(display_character, 1, 1, NULL, *os);
}
/* if we overwrite the screen length */ if (width <= 0) while ((*os != 0) && (*os != '\n') && (*os != '\r'))
os++;
}
spin_unlock(&progress_lock);
}
EXPORT_SYMBOL_GPL(rtas_progress); /* needed by rtas_flash module */
int rtas_token(constchar *service)
{ conststruct rtas_function *func; const __be32 *tokp;
if (rtas.dev == NULL) return RTAS_UNKNOWN_SERVICE;
func = rtas_name_to_function(service); if (func) return func->token; /* * The caller is looking up a name that is not known to be an * RTAS function. Either it's a function that needs to be * added to the table, or they're misusing rtas_token() to * access non-function properties of the /rtas node. Warn and * fall back to the legacy behavior.
*/
WARN_ONCE(1, "unknown function `%s`, should it be added to rtas_function_table?\n",
service);
/* * Return the firmware-specified size of the error log buffer * for all rtas calls that require an error buffer argument. * This includes 'check-exception' and 'rtas-last-error'.
*/ int rtas_get_error_log_max(void)
{ return rtas_error_log_max;
}
if (of_property_read_u32(rtas.dev, propname, &max)) {
pr_warn("%s not found, using default of %u\n",
propname, RTAS_ERROR_LOG_MAX);
max = RTAS_ERROR_LOG_MAX;
}
if (max > RTAS_ERROR_LOG_MAX) {
pr_warn("%s = %u, clamping max error log size to %u\n",
propname, max, RTAS_ERROR_LOG_MAX);
max = RTAS_ERROR_LOG_MAX;
}
rtas_error_log_max = max;
}
staticchar rtas_err_buf[RTAS_ERROR_LOG_MAX];
/** Return a copy of the detailed error text associated with the * most recent failed call to rtas. Because the error text * might go stale if there are any other intervening rtas calls, * this routine must be called atomically with whatever produced * the error (i.e. with rtas_lock still held from the previous call).
*/ staticchar *__fetch_rtas_last_error(char *altbuf)
{ const s32 token = rtas_function_token(RTAS_FN_RTAS_LAST_ERROR); struct rtas_args err_args, save_args;
u32 bufsz; char *buf = NULL;
/* Log the error in the unlikely case that there was one. */ if (unlikely(err_args.args[2] == 0)) { if (altbuf) {
buf = altbuf;
} else {
buf = rtas_err_buf; if (slab_is_available())
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
} if (buf)
memmove(buf, rtas_err_buf, RTAS_ERROR_LOG_MAX);
}
for (i = 0; i < nargs; ++i)
args->args[i] = cpu_to_be32(va_arg(list, __u32));
for (i = 0; i < nret; ++i)
args->rets[i] = 0;
do_enter_rtas(args);
}
/** * rtas_call_unlocked() - Invoke an RTAS firmware function without synchronization. * @args: RTAS parameter block to be used for the call, must obey RTAS addressing * constraints. * @token: Identifies the function being invoked. * @nargs: Number of input parameters. Does not include token. * @nret: Number of output parameters, including the call status. * @....: List of @nargs input parameters. * * Invokes the RTAS function indicated by @token, which the caller * should obtain via rtas_function_token(). * * This function is similar to rtas_call(), but must be used with a * limited set of RTAS calls specifically exempted from the general * requirement that only one RTAS call may be in progress at any * time. Examples include stop-self and ibm,nmi-interlock.
*/ void rtas_call_unlocked(struct rtas_args *args, int token, int nargs, int nret, ...)
{
va_list list;
/** * rtas_call() - Invoke an RTAS firmware function. * @token: Identifies the function being invoked. * @nargs: Number of input parameters. Does not include token. * @nret: Number of output parameters, including the call status. * @outputs: Array of @nret output words. * @....: List of @nargs input parameters. * * Invokes the RTAS function indicated by @token, which the caller * should obtain via rtas_function_token(). * * The @nargs and @nret arguments must match the number of input and * output parameters specified for the RTAS function. * * rtas_call() returns RTAS status codes, not conventional Linux errno * values. Callers must translate any failure to an appropriate errno * in syscall context. Most callers of RTAS functions that can return * -2 or 990x should use rtas_busy_delay() to correctly handle those * statuses before calling again. * * The return value descriptions are adapted from 7.2.8 [RTAS] Return * Codes of the PAPR and CHRP specifications. * * Context: Process context preferably, interrupt context if * necessary. Acquires an internal spinlock and may perform * GFP_ATOMIC slab allocation in error path. Unsafe for NMI * context. * Return: * * 0 - RTAS function call succeeded. * * -1 - RTAS function encountered a hardware or * platform error, or the token is invalid, * or the function is restricted by kernel policy. * * -2 - Specs say "A necessary hardware device was busy, * and the requested function could not be * performed. The operation should be retried at * a later time." This is misleading, at least with * respect to current RTAS implementations. What it * usually means in practice is that the function * could not be completed while meeting RTAS's * deadline for returning control to the OS (250us * for PAPR/PowerVM, typically), but the call may be * immediately reattempted to resume work on it. * * -3 - Parameter error. * * -7 - Unexpected state change. * * 9000...9899 - Vendor-specific success codes. * * 9900...9905 - Advisory extended delay. Caller should try * again after ~10^x ms has elapsed, where x is * the last digit of the status [0-5]. Again going * beyond the PAPR text, 990x on PowerVM indicates * contention for RTAS-internal resources. Other * RTAS call sequences in progress should be * allowed to complete before reattempting the * call. * * -9000 - Multi-level isolation error. * * -9999...-9004 - Vendor-specific error codes. * * Additional negative values - Function-specific error. * * Additional positive values - Function-specific success.
*/ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
{ struct pin_cookie cookie;
va_list list; int i; unsignedlong flags; struct rtas_args *args; char *buff_copy = NULL; int ret;
if (!rtas.entry || token == RTAS_UNKNOWN_SERVICE) return -1;
if (token_is_restricted_errinjct(token)) { /* * It would be nicer to not discard the error value * from security_locked_down(), but callers expect an * RTAS status, not an errno.
*/ if (security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION)) return -1;
}
/* A -1 return code indicates that the last command couldn't
be completed due to a hardware error. */ if (be32_to_cpu(args->rets[0]) == -1)
buff_copy = __fetch_rtas_last_error(NULL);
if (nret > 1 && outputs != NULL) for (i = 0; i < nret-1; ++i)
outputs[i] = be32_to_cpu(args->rets[i + 1]);
ret = (nret > 0) ? be32_to_cpu(args->rets[0]) : 0;
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0); if (slab_is_available())
kfree(buff_copy);
} return ret;
}
EXPORT_SYMBOL_GPL(rtas_call);
/** * rtas_busy_delay_time() - From an RTAS status value, calculate the * suggested delay time in milliseconds. * * @status: a value returned from rtas_call() or similar APIs which return * the status of a RTAS function call. * * Context: Any context. * * Return: * * 100000 - If @status is 9905. * * 10000 - If @status is 9904. * * 1000 - If @status is 9903. * * 100 - If @status is 9902. * * 10 - If @status is 9901. * * 1 - If @status is either 9900 or -2. This is "wrong" for -2, but * some callers depend on this behavior, and the worst outcome * is that they will delay for longer than necessary. * * 0 - If @status is not a busy or extended delay value.
*/ unsignedint rtas_busy_delay_time(int status)
{ int order; unsignedint ms = 0;
if (status == RTAS_BUSY) {
ms = 1;
} elseif (status >= RTAS_EXTENDED_DELAY_MIN &&
status <= RTAS_EXTENDED_DELAY_MAX) {
order = status - RTAS_EXTENDED_DELAY_MIN; for (ms = 1; order > 0; order--)
ms *= 10;
}
return ms;
}
/* * Early boot fallback for rtas_busy_delay().
*/ staticbool __init rtas_busy_delay_early(int status)
{ static size_t successive_ext_delays __initdata; bool retry;
switch (status) { case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX: /* * In the unlikely case that we receive an extended * delay status in early boot, the OS is probably not * the cause, and there's nothing we can do to clear * the condition. Best we can do is delay for a bit * and hope it's transient. Lie to the caller if it * seems like we're stuck in a retry loop.
*/
mdelay(1);
retry = true;
successive_ext_delays += 1; if (successive_ext_delays > 1000) {
pr_err("too many extended delays, giving up\n");
dump_stack();
retry = false;
successive_ext_delays = 0;
} break; case RTAS_BUSY:
retry = true;
successive_ext_delays = 0; break; default:
retry = false;
successive_ext_delays = 0; break;
}
return retry;
}
/** * rtas_busy_delay() - helper for RTAS busy and extended delay statuses * * @status: a value returned from rtas_call() or similar APIs which return * the status of a RTAS function call. * * Context: Process context. May sleep or schedule. * * Return: * * true - @status is RTAS_BUSY or an extended delay hint. The * caller may assume that the CPU has been yielded if necessary, * and that an appropriate delay for @status has elapsed. * Generally the caller should reattempt the RTAS call which * yielded @status. * * * false - @status is not @RTAS_BUSY nor an extended delay hint. The * caller is responsible for handling @status.
*/ bool __ref rtas_busy_delay(int status)
{ unsignedint ms; bool ret;
/* * Can't do timed sleeps before timekeeping is up.
*/ if (system_state < SYSTEM_SCHEDULING) return rtas_busy_delay_early(status);
switch (status) { case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
ret = true;
ms = rtas_busy_delay_time(status); /* * The extended delay hint can be as high as 100 seconds. * Surely any function returning such a status is either * buggy or isn't going to be significantly slowed by us * polling at 1HZ. Clamp the sleep time to one second.
*/
ms = clamp(ms, 1U, 1000U); /* * The delay hint is an order-of-magnitude suggestion, not a * minimum. It is fine, possibly even advantageous, for us to * pause for less time than hinted. To make sure pause time will * not be way longer than requested independent of HZ * configuration, use fsleep(). See fsleep() for details of * used sleeping functions.
*/
fsleep(ms * 1000); break; case RTAS_BUSY:
ret = true; /* * We should call again immediately if there's no other * work to do.
*/
cond_resched(); break; default:
ret = false; /* * Not a busy or extended delay status; the caller should * handle @status itself. Ensure we warn on misuses in * atomic context regardless.
*/
might_sleep(); break;
}
return ret;
}
EXPORT_SYMBOL_GPL(rtas_busy_delay);
int rtas_error_rc(int rtas_rc)
{ int rc;
switch (rtas_rc) { case RTAS_HARDWARE_ERROR: /* Hardware Error */
rc = -EIO; break; case RTAS_INVALID_PARAMETER: /* Bad indicator/domain/etc */
rc = -EINVAL; break; case -9000: /* Isolation error */
rc = -EFAULT; break; case -9001: /* Outstanding TCE/PTE */
rc = -EEXIST; break; case -9002: /* No usable slot */
rc = -ENODEV; break; default:
pr_err("%s: unexpected error %d\n", __func__, rtas_rc);
rc = -ERANGE; break;
} return rc;
}
EXPORT_SYMBOL_GPL(rtas_error_rc);
int rtas_get_power_level(int powerdomain, int *level)
{ int token = rtas_function_token(RTAS_FN_GET_POWER_LEVEL); int rc;
if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT;
if (rc < 0) return rtas_error_rc(rc); return rc;
}
bool rtas_indicator_present(int token, int *maxindex)
{ int proplen, count, i; conststruct indicator_elem {
__be32 token;
__be32 maxindex;
} *indicators;
indicators = of_get_property(rtas.dev, "rtas-indicators", &proplen); if (!indicators) returnfalse;
count = proplen / sizeof(struct indicator_elem);
for (i = 0; i < count; i++) { if (__be32_to_cpu(indicators[i].token) != token) continue; if (maxindex)
*maxindex = __be32_to_cpu(indicators[i].maxindex); returntrue;
}
returnfalse;
}
int rtas_set_indicator(int indicator, int index, int new_value)
{ int token = rtas_function_token(RTAS_FN_SET_INDICATOR); int rc;
if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT;
do {
rc = rtas_call(token, 3, 1, NULL, indicator, index, new_value);
} while (rtas_busy_delay(rc));
if (rc < 0) return rtas_error_rc(rc); return rc;
}
EXPORT_SYMBOL_GPL(rtas_set_indicator);
/* * Ignoring RTAS extended delay
*/ int rtas_set_indicator_fast(int indicator, int index, int new_value)
{ int token = rtas_function_token(RTAS_FN_SET_INDICATOR); int rc;
if (token == RTAS_UNKNOWN_SERVICE) return -ENOENT;
/** * rtas_ibm_suspend_me() - Call ibm,suspend-me to suspend the LPAR. * * @fw_status: RTAS call status will be placed here if not NULL. * * rtas_ibm_suspend_me() should be called only on a CPU which has * received H_CONTINUE from the H_JOIN hcall. All other active CPUs * should be waiting to return from H_JOIN. * * rtas_ibm_suspend_me() may suspend execution of the OS * indefinitely. Callers should take appropriate measures upon return, such as * resetting watchdog facilities. * * Callers may choose to retry this call if @fw_status is * %RTAS_THREADS_ACTIVE. * * Return: * 0 - The partition has resumed from suspend, possibly after * migration to a different host. * -ECANCELED - The operation was aborted. * -EAGAIN - There were other CPUs not in H_JOIN at the time of the call. * -EBUSY - Some other condition prevented the suspend from succeeding. * -EIO - Hardware/platform error.
*/ int rtas_ibm_suspend_me(int *fw_status)
{ int token = rtas_function_token(RTAS_FN_IBM_SUSPEND_ME); int fwrc; int ret;
fwrc = rtas_call(token, 0, 1, NULL);
switch (fwrc) { case 0:
ret = 0; break; case RTAS_SUSPEND_ABORTED:
ret = -ECANCELED; break; case RTAS_THREADS_ACTIVE:
ret = -EAGAIN; break; case RTAS_NOT_SUSPENDABLE: case RTAS_OUTSTANDING_COPROC:
ret = -EBUSY; break; case -1: default:
ret = -EIO; break;
}
if (fw_status)
*fw_status = fwrc;
return ret;
}
void __noreturn rtas_restart(char *cmd)
{ if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_RESTART);
pr_emerg("system-reboot returned %d\n",
rtas_call(rtas_function_token(RTAS_FN_SYSTEM_REBOOT), 0, 1, NULL)); for (;;);
}
void rtas_power_off(void)
{ if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_POWER_OFF); /* allow power on only with power button press */
pr_emerg("power-off returned %d\n",
rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1)); for (;;);
}
void __noreturn rtas_halt(void)
{ if (rtas_flash_term_hook)
rtas_flash_term_hook(SYS_HALT); /* allow power on only with power button press */
pr_emerg("power-off returned %d\n",
rtas_call(rtas_function_token(RTAS_FN_POWER_OFF), 2, 1, NULL, -1, -1)); for (;;);
}
/* Must be in the RMO region, so we place it here */ staticchar rtas_os_term_buf[2048]; staticbool ibm_extended_os_term;
/* * Firmware with the ibm,extended-os-term property is guaranteed * to always return from an ibm,os-term call. Earlier versions without * this property may terminate the partition which we want to avoid * since it interferes with panic_timeout.
*/
if (token == RTAS_UNKNOWN_SERVICE || !ibm_extended_os_term) return;
/* * Keep calling as long as RTAS returns a "try again" status, * but don't use rtas_busy_delay(), which potentially * schedules.
*/ do {
rtas_call_unlocked(&args, token, 1, 1, NULL, __pa(rtas_os_term_buf));
status = be32_to_cpu(args.rets[0]);
} while (rtas_busy_delay_time(status));
if (status != 0)
pr_emerg("ibm,os-term call failed %d\n", status);
}
/** * rtas_activate_firmware() - Activate a new version of firmware. * * Context: This function may sleep. * * Activate a new version of partition firmware. The OS must call this * after resuming from a partition hibernation or migration in order * to maintain the ability to perform live firmware updates. It's not * catastrophic for this method to be absent or to fail; just log the * condition in that case.
*/ void rtas_activate_firmware(void)
{ int token = rtas_function_token(RTAS_FN_IBM_ACTIVATE_FIRMWARE); int fwrc;
if (token == RTAS_UNKNOWN_SERVICE) {
pr_notice("ibm,activate-firmware method unavailable\n"); return;
}
mutex_lock(&rtas_ibm_activate_firmware_lock);
do {
fwrc = rtas_call(token, 0, 1, NULL);
} while (rtas_busy_delay(fwrc));
mutex_unlock(&rtas_ibm_activate_firmware_lock);
if (fwrc)
pr_err("ibm,activate-firmware failed (%i)\n", fwrc);
}
/** * get_pseries_errorlog() - Find a specific pseries error log in an RTAS * extended event log. * @log: RTAS error/event log * @section_id: two character section identifier * * Return: A pointer to the specified errorlog or NULL if not found.
*/
noinstr struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
uint16_t section_id)
{ struct rtas_ext_event_log_v6 *ext_log =
(struct rtas_ext_event_log_v6 *)log->buffer; struct pseries_errorlog *sect; unsignedchar *p, *log_end;
uint32_t ext_log_length = rtas_error_extended_log_length(log);
uint8_t log_format = rtas_ext_event_log_format(ext_log);
uint32_t company_id = rtas_ext_event_company_id(ext_log);
/* Check that we understand the format */ if (ext_log_length < sizeof(struct rtas_ext_event_log_v6) ||
log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
company_id != RTAS_V6EXT_COMPANY_ID_IBM) return NULL;
log_end = log->buffer + ext_log_length;
p = ext_log->vendor_log;
while (p < log_end) {
sect = (struct pseries_errorlog *)p; if (pseries_errorlog_id(sect) == section_id) return sect;
p += pseries_errorlog_length(sect);
}
return NULL;
}
/* * The sys_rtas syscall, as originally designed, allows root to pass * arbitrary physical addresses to RTAS calls. A number of RTAS calls * can be abused to write to arbitrary memory and do other things that * are potentially harmful to system integrity, and thus should only * be used inside the kernel and not exposed to userspace. * * All known legitimate users of the sys_rtas syscall will only ever * pass addresses that fall within the RMO buffer, and use a known * subset of RTAS calls. * * Accordingly, we filter RTAS requests to check that the call is * permitted, and that provided pointers fall within the RMO buffer. * If a function is allowed to be invoked via the syscall, then its * entry in the rtas_functions table points to a rtas_filter that * describes its constraints, with the indexes of the parameters which * are expected to contain addresses and sizes of buffers allocated * inside the RMO buffer.
*/
staticbool in_rmo_buf(u32 base, u32 end)
{ return base >= rtas_rmo_buf &&
base < (rtas_rmo_buf + RTAS_USER_REGION_SIZE) &&
base <= end &&
end >= rtas_rmo_buf &&
end < (rtas_rmo_buf + RTAS_USER_REGION_SIZE);
}
/* * Only functions with filters attached are allowed.
*/
f = func->filter; if (!f) goto err; /* * And some functions aren't allowed on LE.
*/ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN) && func->banned_for_syscall_on_le) goto err;
if (f->buf_idx1 != -1) {
base = be32_to_cpu(args->args[f->buf_idx1]); if (f->size_idx1 != -1)
size = be32_to_cpu(args->args[f->size_idx1]); elseif (f->fixed_size)
size = f->fixed_size; else
size = 1;
end = base + size - 1;
/* * Special case for ibm,platform-dump - NULL buffer * address is used to indicate end of dump processing
*/ if (is_platform_dump && base == 0) returnfalse;
if (!in_rmo_buf(base, end)) goto err;
}
if (f->buf_idx2 != -1) {
base = be32_to_cpu(args->args[f->buf_idx2]); if (f->size_idx2 != -1)
size = be32_to_cpu(args->args[f->size_idx2]); elseif (f->fixed_size)
size = f->fixed_size; else
size = 1;
end = base + size - 1;
/* * Special case for ibm,configure-connector where the * address can be 0
*/ if (is_config_conn && base == 0) returnfalse;
/* Copy in args. */ if (copy_from_user(args.args, uargs->args,
nargs * sizeof(rtas_arg_t)) != 0) return -EFAULT;
/* * If this token doesn't correspond to a function the kernel * understands, you're not allowed to call it.
*/
func = rtas_token_to_function_untrusted(token); if (!func) return -EINVAL;
if (block_rtas_call(func, nargs, &args)) return -EINVAL;
if (token_is_restricted_errinjct(token)) { int err;
err = security_locked_down(LOCKDOWN_RTAS_ERROR_INJECTION); if (err) return err;
}
/* Need to handle ibm,suspend_me call specially */ if (token == rtas_function_token(RTAS_FN_IBM_SUSPEND_ME)) {
/* * rtas_ibm_suspend_me assumes the streamid handle is in cpu * endian, or at least the hcall within it requires it.
*/ int rc = 0;
u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
| be32_to_cpu(args.args[1]);
rc = rtas_syscall_dispatch_ibm_suspend_me(handle); if (rc == -EAGAIN)
args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE); elseif (rc == -EIO)
args.rets[0] = cpu_to_be32(-1); elseif (rc) return rc; goto copy_return;
}
buff_copy = get_errorlog_buffer();
/* * If this function has a mutex assigned to it, we must * acquire it to avoid interleaving with any kernel-based uses * of the same function. Kernel-based sequences acquire the * appropriate mutex explicitly.
*/ if (func->lock)
mutex_lock(func->lock);
/* A -1 return code indicates that the last command couldn't
be completed due to a hardware error. */ if (be32_to_cpu(args.rets[0]) == -1)
errbuf = __fetch_rtas_last_error(buff_copy);
for (size_t i = 0; i < ARRAY_SIZE(rtas_function_table); ++i) { struct rtas_function *curr = &rtas_function_table[i]; struct rtas_function *prior; int cmp;
curr->token = RTAS_UNKNOWN_SERVICE;
if (i == 0) continue; /* * Ensure table is sorted correctly for binary search * on function names.
*/
prior = &rtas_function_table[i - 1];
cmp = strcmp(prior->name, curr->name); if (cmp < 0) continue;
if (cmp == 0) {
pr_err("'%s' has duplicate function table entries\n",
curr->name);
} else {
pr_err("function table unsorted: '%s' wrongly precedes '%s'\n",
prior->name, curr->name);
}
}
pr_debug("function %s has token %u\n", func->name, func->token);
}
}
/* * Call early during boot, before mem init, to retrieve the RTAS * information from the device-tree and allocate the RMO buffer for userland * accesses.
*/ void __init rtas_initialize(void)
{ unsignedlong rtas_region = RTAS_INSTANTIATE_MAX;
u32 base, size, entry; int no_base, no_size, no_entry;
/* Get RTAS dev node and fill up our "rtas" structure with infos * about it.
*/
rtas.dev = of_find_node_by_name(NULL, "rtas"); if (!rtas.dev) return;
/* Must be called before any function token lookups */
rtas_function_table_init();
/* * Discover this now to avoid a device tree lookup in the * panic path.
*/
ibm_extended_os_term = of_property_read_bool(rtas.dev, "ibm,extended-os-term");
/* If RTAS was found, allocate the RMO buffer for it and look for * the stop-self token if any
*/ #ifdef CONFIG_PPC64 if (firmware_has_feature(FW_FEATURE_LPAR))
rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); #endif
rtas_rmo_buf = memblock_phys_alloc_range(RTAS_USER_REGION_SIZE, PAGE_SIZE,
0, rtas_region); if (!rtas_rmo_buf)
panic("ERROR: RTAS: Failed to allocate %lx bytes below %pa\n",
PAGE_SIZE, &rtas_region);
rtas_work_area_reserve_arena(rtas_region);
}
int __init early_init_dt_scan_rtas(unsignedlong node, constchar *uname, int depth, void *data)
{ const u32 *basep, *entryp, *sizep;
#ifdef CONFIG_PPC64 /* need this feature to decide the crashkernel offset */ if (of_get_flat_dt_prop(node, "ibm,hypertas-functions", NULL))
powerpc_firmware_features |= FW_FEATURE_LPAR; #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.