/* * This list of permanent mappings is for memory that may be accessed from * interrupt context, where we can't do the ioremap().
*/ struct acpi_ioremap { struct list_head list; void __iomem *virt;
acpi_physical_address phys;
acpi_size size; union { unsignedlong refcount; struct rcu_work rwork;
} track;
};
#ifdef CONFIG_KEXEC /* * We may have been provided with an RSDP on the command line, * but if a malicious user has done so they may be pointing us * at modified ACPI tables that could alter kernel behaviour - * so, we check the lockdown status before making use of * it. If we trust it then also stash it in an architecture * specific location (if appropriate) so it can be carried * over further kexec()s.
*/ if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) {
acpi_arch_set_root_pointer(acpi_rsdp); return acpi_rsdp;
} #endif
pa = acpi_arch_get_root_pointer(); if (pa) return pa;
if (efi_enabled(EFI_CONFIG_TABLES)) { if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) return efi.acpi20; if (efi.acpi != EFI_INVALID_TABLE_ADDR) return efi.acpi;
pr_err("System description tables not found\n");
} elseif (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
acpi_find_root_pointer(&pa);
}
return pa;
}
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ staticstruct acpi_ioremap *
acpi_map_lookup(acpi_physical_address phys, acpi_size size)
{ struct acpi_ioremap *map;
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ staticvoid __iomem *
acpi_map_vaddr_lookup(acpi_physical_address phys, unsignedint size)
{ struct acpi_ioremap *map;
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */ staticstruct acpi_ioremap *
acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
{ struct acpi_ioremap *map;
#ifdefined(CONFIG_ARM64) || defined(CONFIG_RISCV) /* ioremap will take care of cache attributes */ #define should_use_kmap(pfn) 0 #else #define should_use_kmap(pfn) page_is_ram(pfn) #endif
/** * acpi_os_map_iomem - Get a virtual address for a given physical address range. * @phys: Start of the physical address range to map. * @size: Size of the physical address range to map. * * Look up the given physical address range in the list of existing ACPI memory * mappings. If found, get a reference to it and return a pointer to it (its * virtual address). If not found, map it, add it to that list and return a * pointer to it. * * During early init (when acpi_permanent_mmap has not been set yet) this * routine simply calls __acpi_map_table() to get the job done.
*/ void __iomem __ref
*acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
{ struct acpi_ioremap *map; void __iomem *virt;
acpi_physical_address pg_off;
acpi_size pg_sz;
if (phys > ULONG_MAX) {
pr_err("Cannot map memory that high: 0x%llx\n", phys); return NULL;
}
if (!acpi_permanent_mmap) return __acpi_map_table((unsignedlong)phys, size);
mutex_lock(&acpi_ioremap_lock); /* Check if there's a suitable mapping already. */
map = acpi_map_lookup(phys, size); if (map) {
map->track.refcount++; goto out;
}
/* Must be called with mutex_lock(&acpi_ioremap_lock) */ staticvoid acpi_os_drop_map_ref(struct acpi_ioremap *map)
{ if (--map->track.refcount) return;
/** * acpi_os_unmap_iomem - Drop a memory mapping reference. * @virt: Start of the address range to drop a reference to. * @size: Size of the address range to drop a reference to. * * Look up the given virtual address range in the list of existing ACPI memory * mappings, drop a reference to it and if there are no more active references * to it, queue it up for later removal. * * During early init (when acpi_permanent_mmap has not been set yet) this * routine simply calls __acpi_unmap_table() to get the job done. Since * __acpi_unmap_table() is an __init function, the __ref annotation is needed * here.
*/ void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
{ struct acpi_ioremap *map;
if (!acpi_permanent_mmap) {
__acpi_unmap_table(virt, size); return;
}
mutex_lock(&acpi_ioremap_lock);
map = acpi_map_lookup_virt(virt, size); if (!map) {
mutex_unlock(&acpi_ioremap_lock);
WARN(true, "ACPI: %s: bad address %p\n", __func__, virt); return;
}
acpi_os_drop_map_ref(map);
/** * acpi_os_unmap_memory - Drop a memory mapping reference. * @virt: Start of the address range to drop a reference to. * @size: Size of the address range to drop a reference to.
*/ void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
{
acpi_os_unmap_iomem((void __iomem *)virt, size);
}
EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
/* * ACPI interrupts different from the SCI in our copy of the FADT are * not supported.
*/ if (gsi != acpi_gbl_FADT.sci_interrupt) return AE_BAD_PARAMETER;
if (acpi_irq_handler) return AE_ALREADY_ACQUIRED;
if (acpi_gsi_to_irq(gsi, &irq) < 0) {
pr_err("SCI (ACPI GSI %d) not registered\n", gsi); return AE_OK;
}
/* * Use a hrtimer because the timer wheel timers are optimized for * cancelation before they expire and this timer is not going to be * canceled. * * Set the delta between the requested sleep time and the effective * deadline to at least 50 us in case there is an opportunity for timer * coalescing. * * Moreover, longer sleeps can be assumed to need somewhat less timer * precision, so sacrifice some of it for making the timer a more likely * candidate for coalescing by setting the delta to 1% of the sleep time * if it is above 5 ms (this value is chosen so that the delta is a * continuous function of the sleep time).
*/ if (ms > 5)
delta_us = (USEC_PER_MSEC / 100) * ms;
if (delay > us)
delay = us;
udelay(delay);
touch_nmi_watchdog();
us -= delay;
}
}
/* * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running, * monotonically increasing timer with 100ns granularity. Do not use * ktime_get() to implement this function because this function may get * called after timekeeping has been suspended. Note: calling this function * after timekeeping has been suspended may lead to unexpected results * because when timekeeping is suspended the jiffies counter is not * incremented. See also timekeeping_suspend().
*/
u64 acpi_os_get_timer(void)
{ return (get_jiffies_64() - INITIAL_JIFFIES) *
(ACPI_100NSEC_PER_SEC / HZ);
}
if (!IS_ENABLED(CONFIG_HAS_IOPORT)) { /* * set all-1 result as if reading from non-existing * I/O port
*/
*value = GENMASK(width, 0); return AE_NOT_IMPLEMENTED;
}
int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
{
switch (width) { case 8:
*(u8 *) value = readb(virt_addr); break; case 16:
*(u16 *) value = readw(virt_addr); break; case 32:
*(u32 *) value = readl(virt_addr); break; case 64:
*(u64 *) value = readq(virt_addr); break; default: return -EINVAL;
}
int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
{ int ret; int (*func)(acpi_osd_exec_callback, void *); struct module *owner;
if (!acpi_debugger_initialized) return -ENODEV;
mutex_lock(&acpi_debugger.lock); if (!acpi_debugger.ops) {
ret = -ENODEV; goto err_lock;
} if (!try_module_get(acpi_debugger.owner)) {
ret = -ENODEV; goto err_lock;
}
func = acpi_debugger.ops->create_thread;
owner = acpi_debugger.owner;
mutex_unlock(&acpi_debugger.lock);
/******************************************************************************* * * FUNCTION: acpi_os_execute * * PARAMETERS: Type - Type of the callback * Function - Function to be executed * Context - Function parameters * * RETURN: Status * * DESCRIPTION: Depending on type, either queues function for deferred execution or * immediately executes function on a separate thread. *
******************************************************************************/
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Scheduling function [%p(%p)] for deferred execution.\n",
function, context));
if (type == OSL_DEBUGGER_MAIN_THREAD) {
ret = acpi_debugger_create_thread(function, context); if (ret) {
pr_err("Kernel thread creation failed\n"); return AE_ERROR;
} return AE_OK;
}
/* * Allocate/initialize DPC structure. Note that this memory will be * freed by the callee. The kernel handles the work_struct list in a * way that allows us to also free its memory inside the callee. * Because we may want to schedule several tasks with different * parameters we can't use the approach some kernel code uses of * having a static work_struct.
*/
dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC); if (!dpc) return AE_NO_MEMORY;
/* * To prevent lockdep from complaining unnecessarily, make sure that * there is a different static lockdep key for each workqueue by using * INIT_WORK() for each of them separately.
*/ switch (type) { case OSL_NOTIFY_HANDLER:
ret = queue_work(kacpi_notify_wq, &dpc->work); break; case OSL_GPE_HANDLER: /* * On some machines, a software-initiated SMI causes corruption * unless the SMI runs on CPU 0. An SMI can be initiated by * any AML, but typically it's done in GPE-related methods that * are run via workqueues, so we can avoid the known corruption * cases by always queueing on CPU 0.
*/
ret = queue_work_on(0, kacpid_wq, &dpc->work); break; default:
pr_err("Unsupported os_execute type %d.\n", type); goto err;
} if (!ret) {
pr_err("Unable to queue work\n"); goto err;
}
void acpi_os_wait_events_complete(void)
{ /* * Make sure the GPE handler or the fixed event handler is not used * on another CPU after removal.
*/ if (acpi_sci_irq_valid())
synchronize_hardirq(acpi_sci_irq);
flush_workqueue(kacpid_wq);
flush_workqueue(kacpi_notify_wq);
}
EXPORT_SYMBOL(acpi_os_wait_events_complete);
acpi_handle_debug(adev->handle, "Scheduling hotplug event %u for deferred handling\n",
src);
hpw = kmalloc(sizeof(*hpw), GFP_KERNEL); if (!hpw) return AE_NO_MEMORY;
INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
hpw->adev = adev;
hpw->src = src; /* * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because * the hotplug code may call driver .remove() functions, which may * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush * these workqueues.
*/ if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
kfree(hpw); return AE_ERROR;
} return AE_OK;
}
/* * TODO: A better way to delete semaphores? Linux doesn't have a * 'delete_semaphore()' function -- may result in an invalid * pointer dereference for non-synchronized consumers. Should * we at least check for blocked threads and signal/cancel them?
*/
BUG_ON(!list_empty(&sem->wait_list));
kfree(sem);
sem = NULL;
return AE_OK;
}
/* * TODO: Support for units > 1?
*/
acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
{
acpi_status status = AE_OK; struct semaphore *sem = (struct semaphore *)handle; long jiffies; int ret = 0;
if (!acpi_os_initialized) return AE_OK;
if (!sem || (units < 1)) return AE_BAD_PARAMETER;
if (units > 1) return AE_SUPPORT;
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
handle, units, timeout));
/* remove the CR kdb includes */
chars = strlen(buffer) - 1;
buffer[chars] = '\0';
} #else int ret;
ret = acpi_debugger_read_cmd(buffer, buffer_length); if (ret < 0) return AE_ERROR; if (bytes_read)
*bytes_read = ret; #endif
return AE_OK;
}
EXPORT_SYMBOL(acpi_os_get_line);
acpi_status acpi_os_wait_command_ready(void)
{ int ret;
ret = acpi_debugger_wait_command_ready(); if (ret < 0) return AE_ERROR; return AE_OK;
}
acpi_status acpi_os_notify_command_complete(void)
{ int ret;
ret = acpi_debugger_notify_command_complete(); if (ret < 0) return AE_ERROR; return AE_OK;
}
acpi_status acpi_os_signal(u32 function, void *info)
{ switch (function) { case ACPI_SIGNAL_FATAL:
pr_err("Fatal opcode executed\n"); break; case ACPI_SIGNAL_BREAKPOINT: /* * AML Breakpoint * ACPI spec. says to treat it as a NOP unless * you are debugging. So if/when we integrate * AML debugger into the kernel debugger its * hook will go here. But until then it is * not useful to print anything on breakpoints.
*/ break; default: break;
}
/* * Disable the auto-serialization of named objects creation methods. * * This feature is enabled by default. It marks the AML control methods * that contain the opcodes to create named objects as "Serialized".
*/ staticint __init acpi_no_auto_serialize_setup(char *str)
{
acpi_gbl_auto_serialize_methods = FALSE;
pr_info("Auto-serialization disabled\n");
/* Check of resource interference between native drivers and ACPI * OperationRegions (SystemIO and System Memory only). * IO ports and memory declared in ACPI might be used by the ACPI subsystem * in arbitrary AML code and can interfere with legacy drivers. * acpi_enforce_resources= can be set to: * * - strict (default) (2) * -> further driver trying to access the resources will not load * - lax (1) * -> further driver trying to access the resources will load, but you * get a system message that something might go wrong... * * - no (0) * -> ACPI Operation Region resources will not be registered *
*/ #define ENFORCE_RESOURCES_STRICT 2 #define ENFORCE_RESOURCES_LAX 1 #define ENFORCE_RESOURCES_NO 0
/* Check for resource conflicts between ACPI OperationRegions and native
* drivers */ int acpi_check_resource_conflict(conststruct resource *res)
{
acpi_adr_space_type space_id;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) return 0;
/******************************************************************************* * * FUNCTION: acpi_os_create_cache * * PARAMETERS: name - Ascii name for the cache * size - Size of each cached object * depth - Maximum depth of the cache (in objects) <ignored> * cache - Where the new cache object is returned * * RETURN: status * * DESCRIPTION: Create a cache object *
******************************************************************************/
/******************************************************************************* * * FUNCTION: acpi_os_release_object * * PARAMETERS: Cache - Handle to cache object * Object - The object to be released * * RETURN: None * * DESCRIPTION: Release an object to the specified cache. If cache is full, * the object is deleted. *
******************************************************************************/
if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) { /* * Use acpi_os_map_generic_address to pre-map the reset * register if it's in system memory.
*/ void *rv;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.