/* This enables us to keep track of the memory removed from each node. */ struct memtrace_entry { void *mem;
u64 start;
u64 size;
u32 nid; struct dentry *dir; char name[16];
};
#define FLUSH_CHUNK_SIZE SZ_1G /** * flush_dcache_range_chunked(): Write any modified data cache blocks out to * memory and invalidate them, in chunks of up to FLUSH_CHUNK_SIZE * Does not invalidate the corresponding instruction cache blocks. * * @start: the start address * @stop: the stop address (exclusive) * @chunk: the max size of the chunks
*/ staticvoid flush_dcache_range_chunked(unsignedlong start, unsignedlong stop, unsignedlong chunk)
{ unsignedlong i;
for (i = start; i < stop; i += chunk) {
flush_dcache_range(i, min(stop, i + chunk));
cond_resched();
}
}
/* * Trace memory needs to be aligned to the size, which is guaranteed * by alloc_contig_pages().
*/
page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
__GFP_NOWARN | __GFP_ZERO, nid, NULL); if (!page) return 0;
start_pfn = page_to_pfn(page);
/* * Before we go ahead and use this range as cache inhibited range * flush the cache.
*/
flush_dcache_range_chunked((unsignedlong)pfn_to_kaddr(start_pfn),
(unsignedlong)pfn_to_kaddr(start_pfn + nr_pages),
FLUSH_CHUNK_SIZE);
/* * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, * dumping, ...) should be touching these pages.
*/ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++)
__SetPageOffline(pfn_to_page(pfn));
staticint memtrace_init_regions_runtime(u64 size)
{
u32 nid;
u64 m;
memtrace_array = kcalloc(num_online_nodes(), sizeof(struct memtrace_entry), GFP_KERNEL); if (!memtrace_array) {
pr_err("Failed to allocate memtrace_array\n"); return -EINVAL;
}
for_each_online_node(nid) {
m = memtrace_alloc_node(nid, size);
/* * A node might not have any local memory, so warn but * continue on.
*/ if (!m) {
pr_err("Failed to allocate trace memory on node %d\n", nid); continue;
}
pr_info("Allocated trace memory on node %d at 0x%016llx\n", nid, m);
staticint memtrace_init_debugfs(void)
{ int ret = 0; int i;
for (i = 0; i < memtrace_array_nr; i++) { struct dentry *dir; struct memtrace_entry *ent = &memtrace_array[i];
ent->mem = ioremap(ent->start, ent->size); /* Warn but continue on */ if (!ent->mem) {
pr_err("Failed to map trace memory at 0x%llx\n",
ent->start);
ret = -1; continue;
}
snprintf(ent->name, 16, "%08x", ent->nid);
dir = debugfs_create_dir(ent->name, memtrace_debugfs_dir);
/* * Iterate through the chunks of memory we allocated and attempt to expose * them back to the kernel.
*/ staticint memtrace_free_regions(void)
{ int i, ret = 0; struct memtrace_entry *ent;
for (i = memtrace_array_nr - 1; i >= 0; i--) {
ent = &memtrace_array[i];
/* We have freed this chunk previously */ if (ent->nid == NUMA_NO_NODE) continue;
/* Remove from io mappings */ if (ent->mem) {
iounmap(ent->mem);
ent->mem = 0;
}
if (memtrace_free(ent->nid, ent->start, ent->size)) {
pr_err("Failed to free trace memory on node %d\n",
ent->nid);
ret += 1; continue;
}
/* * Memory was freed successfully so clean up references to it * so on reentry we can tell that this chunk was freed.
*/
debugfs_remove_recursive(ent->dir);
pr_info("Freed trace memory back on node %d\n", ent->nid);
ent->size = ent->start = ent->nid = NUMA_NO_NODE;
} if (ret) return ret;
/* If all chunks of memory were freed successfully, reset globals */
kfree(memtrace_array);
memtrace_array = NULL;
memtrace_size = 0;
memtrace_array_nr = 0; return 0;
}
/* * Don't attempt to do anything if size isn't aligned to a memory * block or equal to zero.
*/
bytes = memory_block_size_bytes(); if (val & (bytes - 1)) {
pr_err("Value must be aligned with 0x%llx\n", bytes); return -EINVAL;
}
mutex_lock(&memtrace_mutex);
/* Free all previously allocated memory. */ if (memtrace_size && memtrace_free_regions()) goto out_unlock;
if (!val) {
rc = 0; goto out_unlock;
}
/* Allocate memory. */ if (memtrace_init_regions_runtime(val)) goto out_unlock;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.