nb = kcalloc(amd_northbridges.num, sizeof(struct amd_northbridge), GFP_KERNEL); if (!nb) return -ENOMEM;
amd_northbridges.nb = nb;
for (i = 0; i < amd_northbridges.num; i++) {
node_to_amd_nb(i)->misc = amd_node_get_func(i, 3);
/* * Each Northbridge must have a 'misc' device. * If not, then uninitialize everything.
*/ if (!node_to_amd_nb(i)->misc) {
amd_northbridges.num = 0;
kfree(nb); return -ENODEV;
}
if (amd_gart_present())
amd_northbridges.flags |= AMD_NB_GART;
if (!cpuid_amd_hygon_has_l3_cache()) return 0;
/* * Some CPU families support L3 Cache Index Disable. There are some * limitations because of E382 and E388 on family 0x10.
*/ if (boot_cpu_data.x86 == 0x10 &&
boot_cpu_data.x86_model >= 0x8 &&
(boot_cpu_data.x86_model > 0x9 ||
boot_cpu_data.x86_stepping >= 0x1))
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
if (boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
/* L3 cache partitioning is supported on family 0x15 */ if (boot_cpu_data.x86 == 0x15)
amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
return 0;
}
/* * Ignores subdevice/subvendor but as far as I can figure out * they're useless anyways
*/ bool __init early_is_amd_nb(u32 device)
{ conststruct pci_device_id *id;
u32 vendor = device & 0xffff;
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) returnfalse;
if (cpu_feature_enabled(X86_FEATURE_ZEN)) returnfalse;
device >>= 16; for (id = amd_nb_misc_ids; id->vendor; id++) if (vendor == id->vendor && device == id->device) returntrue; returnfalse;
}
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return NULL;
/* Assume CPUs from Fam10h have mmconfig, although not all VMs do */ if (boot_cpu_data.x86 < 0x10 ||
rdmsrq_safe(MSR_FAM10H_MMIO_CONF_BASE, &msr)) return NULL;
/* mmconfig is not enabled */ if (!(msr & FAM10H_MMIO_CONF_ENABLE)) return NULL;
base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
int amd_set_subcaches(int cpu, unsignedlong mask)
{ staticunsignedint reset, ban; struct amd_northbridge *nb = node_to_amd_nb(topology_amd_node_id(cpu)); unsignedint reg; int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf) return -EINVAL;
/* if necessary, collect reset state of L3 partitioning and BAN mode */ if (reset == 0) {
pci_read_config_dword(nb->link, 0x1d4, &reset);
pci_read_config_dword(nb->misc, 0x1b8, &ban);
ban &= 0x180000;
}
/* deactivate BAN mode if any subcaches are to be disabled */ if (mask != 0xf) {
pci_read_config_dword(nb->misc, 0x1b8, ®);
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
}
for (i = 0; i != amd_northbridges.num; i++)
pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
}
void amd_flush_garts(void)
{ int flushed, i; unsignedlong flags; static DEFINE_SPINLOCK(gart_lock);
if (!amd_nb_has_feature(AMD_NB_GART)) return;
/* * Avoid races between AGP and IOMMU. In theory it's not needed * but I'm not sure if the hardware won't lose flush requests * when another is pending. This whole thing is so expensive anyways * that it doesn't matter to serialize more. -AK
*/
spin_lock_irqsave(&gart_lock, flags);
flushed = 0; for (i = 0; i < amd_northbridges.num; i++) {
pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
flush_words[i] | 1);
flushed++;
} for (i = 0; i < amd_northbridges.num; i++) {
u32 w; /* Make sure the hardware actually executed the flush*/ for (;;) {
pci_read_config_dword(node_to_amd_nb(i)->misc,
0x9c, &w); if (!(w & 1)) break;
cpu_relax();
}
}
spin_unlock_irqrestore(&gart_lock, flags); if (!flushed)
pr_notice("nothing to flush?\n");
}
EXPORT_SYMBOL_GPL(amd_flush_garts);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.