/* * MTRR (Memory Type Range Register) cleanup * * Copyright (C) 2009 Yinghai Lu * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Library General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/ #include <linux/init.h> #include <linux/pci.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/kvm_para.h> #include <linux/range.h>
#define BIOS_BUG_MSG \ "WARNING: BIOS bug: VAR MTRR %d contains strange UC entry under 1M, check with your system vendor!\n"
staticint __init
x86_get_mtrr_mem_range(struct range *range, int nr_range, unsignedlong extra_remove_base, unsignedlong extra_remove_size)
{ unsignedlong base, size;
mtrr_type type; int i;
for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type; if (type != MTRR_TYPE_WRBACK) continue;
base = range_state[i].base_pfn;
size = range_state[i].size_pfn;
nr_range = add_range_with_merge(range, RANGE_NUM, nr_range,
base, base + size);
}
Dprintk("After WB checking\n"); for (i = 0; i < nr_range; i++)
Dprintk("MTRR MAP PFN: %016llx - %016llx\n",
range[i].start, range[i].end);
/* Take out UC ranges: */ for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type; if (type != MTRR_TYPE_UNCACHABLE &&
type != MTRR_TYPE_WRPROT) continue;
size = range_state[i].size_pfn; if (!size) continue;
base = range_state[i].base_pfn; if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
(mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) { /* Var MTRR contains UC entry below 1M? Skip it: */
pr_warn(BIOS_BUG_MSG, i); if (base + size <= (1<<(20-PAGE_SHIFT))) continue;
size -= (1<<(20-PAGE_SHIFT)) - base;
base = 1<<(20-PAGE_SHIFT);
}
subtract_range(range, RANGE_NUM, base, base + size);
} if (extra_remove_size)
subtract_range(range, RANGE_NUM, extra_remove_base,
extra_remove_base + extra_remove_size);
Dprintk("After UC checking\n"); for (i = 0; i < RANGE_NUM; i++) { if (!range[i].end) continue;
/* Align with gran size, prevent small block used up MTRRs: */
range_basek = ALIGN(state->range_startk, gran_sizek); if ((range_basek > basek) && basek) return second_sizek;
/* Only cut back when it is not the last: */ if (sizek) { while (range0_basek + range0_sizek > (basek + sizek)) { if (range0_sizek >= chunk_sizek)
range0_sizek -= chunk_sizek; else
range0_sizek = 0;
/* See if I can merge with the last range: */ if ((basek <= 1024) ||
(state->range_startk + state->range_sizek == basek)) { unsignedlong endk = basek + sizek;
state->range_sizek = endk - state->range_startk; return;
} /* Write the range mtrrs: */ if (state->range_sizek != 0)
second_sizek = range_to_mtrr_with_hole(state, basek, sizek);
staticint __init
x86_setup_var_mtrrs(struct range *range, int nr_range,
u64 chunk_size, u64 gran_size)
{ struct var_mtrr_state var_state; int num_reg; int i;
staticint __init mtrr_need_cleanup(void)
{ int i;
mtrr_type type; unsignedlong size; /* Extra one for all 0: */ int num[MTRR_NUM_TYPES + 1];
/* Check entries number: */
memset(num, 0, sizeof(num)); for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type;
size = range_state[i].size_pfn; if (type >= MTRR_NUM_TYPES) continue; if (!size)
type = MTRR_NUM_TYPES;
num[type]++;
}
/* Check if we got UC entries: */ if (!num[MTRR_TYPE_UNCACHABLE]) return 0;
/* Check if we only had WB and UC */ if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
num_var_ranges - num[MTRR_NUM_TYPES]) return 0;
return 1;
}
staticunsignedlong __initdata range_sums;
staticvoid __init
mtrr_calc_range_state(u64 chunk_size, u64 gran_size, unsignedlong x_remove_base, unsignedlong x_remove_size, int i)
{ /* * range_new should really be an automatic variable, but * putting 4096 bytes on the stack is frowned upon, to put it * mildly. It is safe to make it a static __initdata variable, * since mtrr_calc_range_state is only called during init and * there's no way it will call itself recursively.
*/ staticstruct range range_new[RANGE_NUM] __initdata; unsignedlong range_sums_new; int nr_range_new; int num_reg;
/* Convert ranges to var ranges state: */
num_reg = x86_setup_var_mtrrs(range, nr_range, chunk_size, gran_size);
/* We got new setting in range_state, check it: */
memset(range_new, 0, sizeof(range_new));
nr_range_new = x86_get_mtrr_mem_range(range_new, 0,
x_remove_base, x_remove_size);
range_sums_new = sum_ranges(range_new, nr_range_new);
/* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state)); for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
range_state[i].base_pfn = base;
range_state[i].size_pfn = size;
range_state[i].type = type;
}
/* Check if we need handle it and can handle it: */ if (!mtrr_need_cleanup()) return 0;
/* Print original var MTRRs at first, for debugging: */
Dprintk("original variable MTRRs\n");
print_out_mtrr_range_state();
/* * [0, 1M) should always be covered by var mtrr with WB * and fixed mtrrs should take effect before var mtrr for it:
*/
nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
1ULL<<(20 - PAGE_SHIFT)); /* add from var mtrr at last */
nr_range = x86_get_mtrr_mem_range(range, nr_range,
x_remove_base, x_remove_size);
/* * Newer AMD K8s and later CPUs have a special magic MSR way to force WB * for memory >4GB. Check for that here. * Note this won't check if the MTRRs < 4GB where the magic bit doesn't * apply to are wrong, but so far we don't know of any such case in the wild.
*/ #define Tom2Enabled (1U << 21) #define Tom2ForceMemTypeWB (1U << 22)
int __init amd_special_default_mtrr(void)
{
u32 l, h;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; if (boot_cpu_data.x86 < 0xf) return 0; /* In case some hypervisor doesn't pass SYSCFG through: */ if (rdmsr_safe(MSR_AMD64_SYSCFG, &l, &h) < 0) return 0; /* * Memory between 4GB and top of mem is forced WB by this magic bit. * Reserved before K8RevF, but should be zero there.
*/ if ((l & (Tom2Enabled | Tom2ForceMemTypeWB)) ==
(Tom2Enabled | Tom2ForceMemTypeWB)) return 1; return 0;
}
/** * mtrr_trim_uncached_memory - trim RAM not covered by MTRRs * @end_pfn: ending page frame number * * Some buggy BIOSes don't setup the MTRRs properly for systems with certain * memory configurations. This routine checks that the highest MTRR matches * the end of memory, to make sure the MTRRs having a write back type cover * all of the memory the kernel is intending to use. If not, it'll trim any * memory off the end by adjusting end_pfn, removing it from the kernel's * allocation pools, warning the user with an obnoxious message.
*/ int __init mtrr_trim_uncached_memory(unsignedlong end_pfn)
{ unsignedlong i, base, size, highest_pfn = 0, def, dummy;
mtrr_type type;
u64 total_trim_size; /* extra one for all 0 */ int num[MTRR_NUM_TYPES + 1];
if (!mtrr_enabled()) return 0;
/* * Make sure we only trim uncachable memory on machines that * support the Intel MTRR architecture:
*/ if (!cpu_feature_enabled(X86_FEATURE_MTRR) || disable_mtrr_trim) return 0;
/* Get it and store it aside: */
memset(range_state, 0, sizeof(range_state)); for (i = 0; i < num_var_ranges; i++) {
mtrr_if->get(i, &base, &size, &type);
range_state[i].base_pfn = base;
range_state[i].size_pfn = size;
range_state[i].type = type;
}
/* Find highest cached pfn: */ for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type; if (type != MTRR_TYPE_WRBACK) continue;
base = range_state[i].base_pfn;
size = range_state[i].size_pfn; if (highest_pfn < base + size)
highest_pfn = base + size;
}
/* kvm/qemu doesn't have mtrr set right, don't trim them all: */ if (!highest_pfn) {
pr_info("CPU MTRRs all blank - virtualized system.\n"); return 0;
}
/* Check entries number: */
memset(num, 0, sizeof(num)); for (i = 0; i < num_var_ranges; i++) {
type = range_state[i].type; if (type >= MTRR_NUM_TYPES) continue;
size = range_state[i].size_pfn; if (!size)
type = MTRR_NUM_TYPES;
num[type]++;
}
/* No entry for WB? */ if (!num[MTRR_TYPE_WRBACK]) return 0;
/* Check if we only had WB and UC: */ if (num[MTRR_TYPE_WRBACK] + num[MTRR_TYPE_UNCACHABLE] !=
num_var_ranges - num[MTRR_NUM_TYPES]) return 0;
/* Check the head: */
total_trim_size = 0; if (range[0].start)
total_trim_size += real_trim_memory(0, range[0].start);
/* Check the holes: */ for (i = 0; i < nr_range - 1; i++) { if (range[i].end < range[i+1].start)
total_trim_size += real_trim_memory(range[i].end,
range[i+1].start);
}
/* Check the top: */
i = nr_range - 1; if (range[i].end < end_pfn)
total_trim_size += real_trim_memory(range[i].end,
end_pfn);
if (total_trim_size) {
pr_warn("WARNING: BIOS bug: CPU MTRRs don't cover all of memory, losing %lluMB of RAM.\n",
total_trim_size >> 20);
if (!changed_by_mtrr_cleanup)
WARN_ON(1);
pr_info("update e820 for mtrr\n");
e820__update_table_print();
return 1;
}
return 0;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.14 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.