/* Generic MTRR (Memory Type Range Register) driver.
Copyright (C) 1997-2000 Richard Gooch Copyright (c) 2002 Patrick Mochel
This library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more details.
You should have received a copy of the GNU Library General Public License along with this library; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
Richard Gooch may be reached by email at rgooch@atnf.csiro.au The postal address is: Richard Gooch, c/o ATNF, P. O. Box 76, Epping, N.S.W., 2121, Australia.
Source: "Pentium Pro Family Developer's Manual, Volume 3: Operating System Writer's Guide" (Intel document number 242692), section 11.11.7
This was cleaned and made readable by Patrick Mochel <mochel@osdl.org> on 6-7 March 2002. Source: Intel Architecture Software Developers Manual, Volume 3: System Programming Guide; Section 9.11. (1997 edition - PPro).
*/
#include <linux/types.h> /* FIXME: kvm_para.h needs this */
/** * mtrr_rendezvous_handler - Work done in the synchronization handler. Executed * by all the CPUs. * @info: pointer to mtrr configuration data * * Returns nothing.
*/ staticint mtrr_rendezvous_handler(void *info)
{ struct set_mtrr_data *data = info;
/** * set_mtrr - update mtrrs on all processors * @reg: mtrr in question * @base: mtrr base * @size: mtrr size * @type: mtrr type * * This is kinda tricky, but fortunately, Intel spelled it out for us cleanly: * * 1. Queue work to do the following on all processors: * 2. Disable Interrupts * 3. Wait for all procs to do so * 4. Enter no-fill cache mode * 5. Flush caches * 6. Clear PGE bit * 7. Flush all TLBs * 8. Disable all range registers * 9. Update the MTRRs * 10. Enable all range registers * 11. Flush all TLBs and caches again * 12. Enter normal cache mode and reenable caching * 13. Set PGE * 14. Wait for buddies to catch up * 15. Enable interrupts. * * What does that mean for us? Well, stop_machine() will ensure that * the rendezvous handler is started on each CPU. And in lockstep they * do the state transition of disabling interrupts, updating MTRR's * (the CPU vendors may each do it differently, so we call mtrr_if->set() * callback and let them take care of it.) and enabling interrupts. * * Note that the mechanism is the same for UP systems, too; all the SMP stuff * becomes nops.
*/ staticvoid set_mtrr(unsignedint reg, unsignedlong base, unsignedlong size,
mtrr_type type)
{ struct set_mtrr_data data = { .smp_reg = reg,
.smp_base = base,
.smp_size = size,
.smp_type = type
};
/** * mtrr_add_page - Add a memory type region * @base: Physical base address of region in pages (in units of 4 kB!) * @size: Physical size of region in pages (4 kB) * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success * the register number for this entry is returned, but should be treated * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent.
*/ int mtrr_add_page(unsignedlong base, unsignedlong size, unsignedint type, bool increment)
{ unsignedlong lbase, lsize; int i, replace, error;
mtrr_type ltype;
if (!mtrr_enabled()) return -ENXIO;
error = mtrr_if->validate_add_page(base, size, type); if (error) return error;
/* If the type is WC, check that this processor supports it */ if ((type == MTRR_TYPE_WRCOMB) && !have_wrcomb()) {
pr_warn("your processor doesn't support write-combining\n"); return -ENOSYS;
}
if (!size) {
pr_warn("zero sized request\n"); return -EINVAL;
}
if ((base | (base + size - 1)) >>
(boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
pr_warn("base or size exceeds the MTRR width\n"); return -EINVAL;
}
error = -EINVAL;
replace = -1;
/* No CPU hotplug when we change MTRR entries */
cpus_read_lock();
/* Search for existing MTRR */
mutex_lock(&mtrr_mutex); for (i = 0; i < num_var_ranges; ++i) {
mtrr_if->get(i, &lbase, &lsize, <ype); if (!lsize || base > lbase + lsize - 1 ||
base + size - 1 < lbase) continue; /* * At this point we know there is some kind of * overlap/enclosure
*/ if (base < lbase || base + size - 1 > lbase + lsize - 1) { if (base <= lbase &&
base + size - 1 >= lbase + lsize - 1) { /* New region encloses an existing region */ if (type == ltype) {
replace = replace == -1 ? i : -2; continue;
} elseif (types_compatible(type, ltype)) continue;
}
pr_warn("0x%lx000,0x%lx000 overlaps existing 0x%lx000,0x%lx000\n", base, size, lbase,
lsize); goto out;
} /* New region is enclosed by an existing region */ if (ltype != type) { if (types_compatible(type, ltype)) continue;
pr_warn("type mismatch for %lx000,%lx000 old: %s new: %s\n",
base, size, mtrr_attrib_to_str(ltype),
mtrr_attrib_to_str(type)); goto out;
} if (increment)
++mtrr_usage_table[i];
error = i; goto out;
} /* Search for an empty MTRR */
i = mtrr_if->get_free_region(base, size, replace); if (i >= 0) {
set_mtrr(i, base, size, type); if (likely(replace < 0)) {
mtrr_usage_table[i] = 1;
} else {
mtrr_usage_table[i] = mtrr_usage_table[replace]; if (increment)
mtrr_usage_table[i]++; if (unlikely(replace != i)) {
set_mtrr(replace, 0, 0, 0);
mtrr_usage_table[replace] = 0;
}
}
} else {
pr_info("no more MTRRs available\n");
}
error = i;
out:
mutex_unlock(&mtrr_mutex);
cpus_read_unlock(); return error;
}
staticint mtrr_check(unsignedlong base, unsignedlong size)
{ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
pr_warn("size and base must be multiples of 4 kiB\n");
Dprintk("size: 0x%lx base: 0x%lx\n", size, base);
dump_stack(); return -1;
} return 0;
}
/** * mtrr_add - Add a memory type region * @base: Physical base address of region * @size: Physical size of region * @type: Type of MTRR desired * @increment: If this is true do usage counting on the region * * Memory type region registers control the caching on newer Intel and * non Intel processors. This function allows drivers to request an * MTRR is added. The details and hardware specifics of each processor's * implementation are hidden from the caller, but nevertheless the * caller should expect to need to provide a power of two size on an * equivalent power of two boundary. * * If the region cannot be added either because all regions are in use * or the CPU cannot support it a negative value is returned. On success * the register number for this entry is returned, but should be treated * as a cookie only. * * On a multiprocessor machine the changes are made to all processors. * This is required on x86 by the Intel processors. * * The available types are * * %MTRR_TYPE_UNCACHABLE - No caching * * %MTRR_TYPE_WRBACK - Write data back in bursts whenever * * %MTRR_TYPE_WRCOMB - Write data back soon but allow bursts * * %MTRR_TYPE_WRTHROUGH - Cache reads but not writes * * BUGS: Needs a quiet flag for the cases where drivers do not mind * failures and do not wish system log messages to be sent.
*/ int mtrr_add(unsignedlong base, unsignedlong size, unsignedint type, bool increment)
{ if (!mtrr_enabled()) return -ENODEV; if (mtrr_check(base, size)) return -EINVAL; return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
increment);
}
/** * mtrr_del_page - delete a memory type region * @reg: Register returned by mtrr_add * @base: Physical base address * @size: Size of region * * If register is supplied then base and size are ignored. This is * how drivers should call it. * * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code.
*/ int mtrr_del_page(int reg, unsignedlong base, unsignedlong size)
{ int i, max;
mtrr_type ltype; unsignedlong lbase, lsize; int error = -EINVAL;
if (!mtrr_enabled()) return -ENODEV;
max = num_var_ranges; /* No CPU hotplug when we change MTRR entries */
cpus_read_lock();
mutex_lock(&mtrr_mutex); if (reg < 0) { /* Search for existing MTRR */ for (i = 0; i < max; ++i) {
mtrr_if->get(i, &lbase, &lsize, <ype); if (lbase == base && lsize == size) {
reg = i; break;
}
} if (reg < 0) {
Dprintk("no MTRR for %lx000,%lx000 found\n", base, size); goto out;
}
} if (reg >= max) {
pr_warn("register: %d too big\n", reg); goto out;
}
mtrr_if->get(reg, &lbase, &lsize, <ype); if (lsize < 1) {
pr_warn("MTRR %d not used\n", reg); goto out;
} if (mtrr_usage_table[reg] < 1) {
pr_warn("reg: %d has count=0\n", reg); goto out;
} if (--mtrr_usage_table[reg] < 1)
set_mtrr(reg, 0, 0, 0);
error = reg;
out:
mutex_unlock(&mtrr_mutex);
cpus_read_unlock(); return error;
}
/** * mtrr_del - delete a memory type region * @reg: Register returned by mtrr_add * @base: Physical base address * @size: Size of region * * If register is supplied then base and size are ignored. This is * how drivers should call it. * * Releases an MTRR region. If the usage count drops to zero the * register is freed and the region returns to default state. * On success the register is returned, on failure a negative error * code.
*/ int mtrr_del(int reg, unsignedlong base, unsignedlong size)
{ if (!mtrr_enabled()) return -ENODEV; if (mtrr_check(base, size)) return -EINVAL; return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
}
/** * arch_phys_wc_add - add a WC MTRR and handle errors if PAT is unavailable * @base: Physical base address * @size: Size of region * * If PAT is available, this does nothing. If PAT is unavailable, it * attempts to add a WC MTRR covering size bytes starting at base and * logs an error if this fails. * * The called should provide a power of two size on an equivalent * power of two boundary. * * Drivers must store the return value to pass to mtrr_del_wc_if_needed, * but drivers should not try to interpret that return value.
*/ int arch_phys_wc_add(unsignedlong base, unsignedlong size)
{ int ret;
if (pat_enabled() || !mtrr_enabled()) return 0; /* Success! (We don't need to do anything.) */
ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true); if (ret < 0) {
pr_warn("Failed to add WC MTRR for [%p-%p]; performance may suffer.",
(void *)base, (void *)(base + size - 1)); return ret;
} return ret + MTRR_TO_PHYS_WC_OFFSET;
}
EXPORT_SYMBOL(arch_phys_wc_add);
/* * arch_phys_wc_del - undoes arch_phys_wc_add * @handle: Return value from arch_phys_wc_add * * This cleans up after mtrr_add_wc_if_needed. * * The API guarantees that mtrr_del_wc_if_needed(error code) and * mtrr_del_wc_if_needed(0) do nothing.
*/ void arch_phys_wc_del(int handle)
{ if (handle >= 1) {
WARN_ON(handle < MTRR_TO_PHYS_WC_OFFSET);
mtrr_del(handle - MTRR_TO_PHYS_WC_OFFSET, 0, 0);
}
}
EXPORT_SYMBOL(arch_phys_wc_del);
/* * arch_phys_wc_index - translates arch_phys_wc_add's return value * @handle: Return value from arch_phys_wc_add * * This will turn the return value from arch_phys_wc_add into an mtrr * index suitable for debugging. * * Note: There is no legitimate use for this function, except possibly * in printk line. Alas there is an illegitimate use in some ancient * drm ioctls.
*/ int arch_phys_wc_index(int handle)
{ if (handle < MTRR_TO_PHYS_WC_OFFSET) return -1; else return handle - MTRR_TO_PHYS_WC_OFFSET;
}
EXPORT_SYMBOL_GPL(arch_phys_wc_index);
int __initdata changed_by_mtrr_cleanup;
/** * mtrr_bp_init - initialize MTRRs on the boot CPU * * This needs to be called early; before any of the other CPUs are * initialized (i.e. before smp_init()).
*/ void __init mtrr_bp_init(void)
{ bool generic_mtrrs = cpu_feature_enabled(X86_FEATURE_MTRR); constchar *why = "(not available)"; unsignedlong config, dummy;
if (!generic_mtrrs && mtrr_state.enabled) { /* * Software overwrite of MTRR state, only for generic case. * Note that X86_FEATURE_MTRR has been reset in this case.
*/
init_table();
mtrr_build_map();
pr_info("MTRRs set to read-only\n");
return;
}
if (generic_mtrrs)
mtrr_if = &generic_mtrr_ops; else
mtrr_set_if();
if (mtrr_enabled()) { /* Get the number of variable MTRR ranges. */ if (mtrr_if == &generic_mtrr_ops)
rdmsr(MSR_MTRRcap, config, dummy); else
config = mtrr_if->var_regs;
num_var_ranges = config & MTRR_CAP_VCNT;
staticint __init mtrr_init_finalize(void)
{ /* * Map might exist if guest_force_mtrr_state() has been called or if * mtrr_enabled() returns true.
*/
mtrr_copy_map();
if (!mtrr_enabled()) return 0;
if (memory_caching_control & CACHE_MTRR) { if (!changed_by_mtrr_cleanup)
mtrr_state_warn(); return 0;
}
mtrr_register_syscore();
return 0;
}
subsys_initcall(mtrr_init_finalize);
Messung V0.5
¤ Dauer der Verarbeitung: 0.29 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.