enum { /* * Ensure the pool is page-aligned.
*/
RTAS_WORK_AREA_ARENA_ALIGN = PAGE_SIZE, /* * Don't let a single allocation claim the whole arena.
*/
RTAS_WORK_AREA_ARENA_SZ = RTAS_WORK_AREA_MAX_ALLOC_SZ * 2, /* * The smallest known work area size is for ibm,get-vpd's * location code argument, which is limited to 79 characters * plus 1 nul terminator. * * PAPR+ 7.3.20 ibm,get-vpd RTAS Call * PAPR+ 12.3.2.4 Converged Location Code Rules - Length Restrictions
*/
RTAS_WORK_AREA_MIN_ALLOC_SZ = roundup_pow_of_two(80),
};
/* * A single work area buffer and descriptor to serve requests early in * boot before the allocator is fully initialized. We know 4KB is the * most any boot time user needs (they all call ibm,get-system-parameter).
*/ staticbool early_work_area_in_use __initdata; staticchar early_work_area_buf[SZ_4K] __initdata __aligned(SZ_4K); staticstruct rtas_work_area early_work_area __initdata = {
.buf = early_work_area_buf,
.size = sizeof(early_work_area_buf),
};
/* * The rtas_work_area_alloc() wrapper enforces this at build * time. Requests that exceed the arena size will block * indefinitely.
*/
WARN_ON(size > RTAS_WORK_AREA_MAX_ALLOC_SZ);
if (!rwa_state.available) return rtas_work_area_alloc_early(size); /* * To ensure FCFS behavior and prevent a high rate of smaller * requests from starving larger ones, use the mutex to queue * allocations.
*/
mutex_lock(&rwa_state.mutex);
wait_event(rwa_state.wqh,
(addr = gen_pool_alloc(rwa_state.gen_pool, size)) != 0);
mutex_unlock(&rwa_state.mutex);
/* * Initialization of the work area allocator happens in two parts. To * reliably reserve an arena that satisfies RTAS addressing * requirements, we must perform a memblock allocation early, * immmediately after RTAS instantiation. Then we have to wait until * the slab allocator is up before setting up the descriptor mempool * and adding the arena to a gen_pool.
*/ static __init int rtas_work_area_allocator_init(void)
{ constunsignedint order = ilog2(RTAS_WORK_AREA_MIN_ALLOC_SZ); const phys_addr_t pa_start = __pa(rwa_state.arena); const phys_addr_t pa_end = pa_start + RTAS_WORK_AREA_ARENA_SZ - 1; struct gen_pool *pool; constint nid = NUMA_NO_NODE; int err;
err = -ENOMEM; if (!rwa_state.arena) goto err_out;
pool = gen_pool_create(order, nid); if (!pool) goto err_out; /* * All RTAS functions that consume work areas are OK with * natural alignment, when they have alignment requirements at * all.
*/
gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
err = gen_pool_add(pool, (unsignedlong)rwa_state.arena,
RTAS_WORK_AREA_ARENA_SZ, nid); if (err) goto err_destroy;
err = mempool_init_kmalloc_pool(&rwa_state.descriptor_pool, 1, sizeof(struct rtas_work_area)); if (err) goto err_destroy;
/** * rtas_work_area_reserve_arena() - Reserve memory suitable for RTAS work areas. * @limit: Upper limit for memblock allocation.
*/ void __init rtas_work_area_reserve_arena(const phys_addr_t limit)
{ const phys_addr_t align = RTAS_WORK_AREA_ARENA_ALIGN; const phys_addr_t size = RTAS_WORK_AREA_ARENA_SZ; const phys_addr_t min = MEMBLOCK_LOW_LIMIT; constint nid = NUMA_NO_NODE;
/* * Too early for a machine_is(pseries) check. But PAPR * effectively mandates that ibm,get-system-parameter is * present: * * R1–7.3.16–1. All platforms must support the System * Parameters option. * * So set up the arena if we find that, with a fallback to * ibm,configure-connector, just in case.
*/ if (rtas_function_implemented(RTAS_FN_IBM_GET_SYSTEM_PARAMETER) ||
rtas_function_implemented(RTAS_FN_IBM_CONFIGURE_CONNECTOR))
rwa_state.arena = memblock_alloc_try_nid(size, align, min, limit, nid);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.