/* * If the core hasn't done any FTLB configuration, there is nothing * for us to do here.
*/ if (!mips_has_ftlb_configured) return 1;
/* Disable it in the boot cpu */ if (set_ftlb_enable(&cpu_data[0], 0)) {
pr_warn("Can't turn FTLB off\n"); return 1;
}
config4 = read_c0_config4();
/* Check that FTLB has been disabled */
mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; /* MMUSIZEEXT == VTLB ON, FTLB OFF */ if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) { /* This should never happen */
pr_warn("FTLB could not be disabled!\n"); return 1;
}
/* * noftlb is mainly used for debug purposes so print * an informative message instead of using pr_debug()
*/
pr_info("FTLB has been disabled\n");
/* * Some of these bits are duplicated in the decode_config4. * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case * once FTLB has been disabled so undo what decode_config4 did.
*/
cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways *
cpu_data[0].tlbsizeftlbsets;
cpu_data[0].tlbsizeftlbsets = 0;
cpu_data[0].tlbsizeftlbways = 0;
return 1;
}
__setup("noftlb", ftlb_disable);
/* * Check if the CPU has per tc perf counters
*/ staticinlinevoid cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c)
{ if (read_c0_config7() & MTI_CONF7_PTC)
c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS;
}
switch (current_cpu_type()) { case CPU_34K: /* * Erratum "RPS May Cause Incorrect Instruction Execution" * This code only handles VPE0, any SMP/RTOS code * making use of VPE1 will be responsible for that VPE.
*/ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS); break; default: break;
}
}
/* * Probe whether cpu has config register by trying to play with * alternate cache bit and see whether it matters. * It's used by cpu_probe to distinguish between R3000A and R3081.
*/ staticinlineint cpu_has_confreg(void)
{ #ifdef CONFIG_CPU_R3000 unsignedlong size1, size2; unsignedlong cfg = read_c0_conf();
unsignedint probability = c->tlbsize / c->tlbsizevtlb;
/* * 0 = All TLBWR instructions go to FTLB * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the * FTLB and 1 goes to the VTLB. * 2 = 7:1: As above with 7:1 ratio. * 3 = 3:1: As above with 3:1 ratio. * * Use the linear midpoint as the probability threshold.
*/ if (probability >= 12) return 1; elseif (probability >= 6) return 2; else /* * So FTLB is less than 4 times bigger than VTLB. * A 3:1 ratio can still be useful though.
*/ return 3;
}
/* It's implementation dependent how the FTLB can be enabled */ switch (c->cputype) { case CPU_PROAPTIV: case CPU_P5600: case CPU_P6600: /* proAptiv & related cores use Config6 to enable the FTLB */
config = read_c0_config6();
config0 = read_c0_config();
mm = config0 & MIPS_CONF_MM;
/* * It's implementation dependent what type of write-merge is supported * and whether it can be enabled/disabled. If it is settable lets make * the merging allowed by default. Some platforms might have * write-through caching unsupported. In this case just ignore the * CP0.Config.MM bit field value.
*/ switch (c->cputype) { case CPU_24K: case CPU_34K: case CPU_74K: case CPU_P5600: case CPU_P6600:
c->options |= MIPS_CPU_MM_FULL;
update = MIPS_CONF_MM_FULL; break; case CPU_1004K: case CPU_1074K: case CPU_INTERAPTIV: case CPU_PROAPTIV:
mm = 0;
fallthrough; default:
update = 0; break;
}
if (cpu_has_tlb) { if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
c->options |= MIPS_CPU_TLBINV;
/* * R6 has dropped the MMUExtDef field from config4. * On R6 the fields always describe the FTLB, and only if it is * present according to Config.MT.
*/ if (!cpu_has_mips_r6)
mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; elseif (cpu_has_ftlb)
mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; else
mmuextdef = 0;
/* * Warn if the computed ASID mask doesn't match the mask the kernel * is built for. This may indicate either a serious problem or an * easy optimisation opportunity, but either way should be addressed.
*/
WARN_ON(asid_mask != cpu_asid_mask(c));
if (cpu_has_mips_r6) { if (!mmid_disabled_quirk && (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid))
config5 |= MIPS_CONF5_MI; else
config5 &= ~MIPS_CONF5_MI;
}
write_c0_config5(config5);
if (config5 & MIPS_CONF5_EVA)
c->options |= MIPS_CPU_EVA; if (config5 & MIPS_CONF5_MRP)
c->options |= MIPS_CPU_MAAR; if (config5 & MIPS_CONF5_LLB)
c->options |= MIPS_CPU_RW_LLB; if (config5 & MIPS_CONF5_MVH)
c->options |= MIPS_CPU_MVH; if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
c->options |= MIPS_CPU_VP; if (config5 & MIPS_CONF5_CA2)
c->ases |= MIPS_ASE_MIPS16E2;
if (config5 & MIPS_CONF5_CRCP)
elf_hwcap |= HWCAP_MIPS_CRC32;
if (cpu_has_mips_r6) { /* Ensure the write to config5 above takes effect */
back_to_back_c0_hazard();
/* Check whether we successfully enabled MMID support */
config5 = read_c0_config5(); if (config5 & MIPS_CONF5_MI)
c->options |= MIPS_CPU_MMID;
/* * Warn if we've hardcoded cpu_has_mmid to a value unsuitable * for the CPU we're running on, or if CPUs in an SMP system * have inconsistent MMID support.
*/
WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI));
if (cpu_has_mmid) {
write_c0_memorymapid(~0ul);
back_to_back_c0_hazard();
asid_mask = read_c0_memorymapid();
/* * We maintain a bitmap to track MMID allocation, and * need a sensible upper bound on the size of that * bitmap. The initial CPU with MMID support (I6500) * supports 16 bit MMIDs, which gives us an 8KiB * bitmap. The architecture recommends that hardware * support 32 bit MMIDs, which would give us a 512MiB * bitmap - that's too big in most cases. * * Cap MMID width at 16 bits for now & we can revisit * this if & when hardware supports anything wider.
*/
max_mmid_width = 16; if (asid_mask > GENMASK(max_mmid_width - 1, 0)) {
pr_info("Capping MMID width at %d bits",
max_mmid_width);
asid_mask = GENMASK(max_mmid_width - 1, 0);
}
set_cpu_asid_mask(c, asid_mask);
}
}
return config5 & MIPS_CONF_M;
}
staticvoid decode_configs(struct cpuinfo_mips *c)
{ int ok;
/* Enable FTLB if present and not disabled */
set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
ok = decode_config0(c); /* Read Config registers. */
BUG_ON(!ok); /* Arch spec violation! */ if (ok)
ok = decode_config1(c); if (ok)
ok = decode_config2(c); if (ok)
ok = decode_config3(c); if (ok)
ok = decode_config4(c); if (ok)
ok = decode_config5(c);
/* Probe the EBase.WG bit */ if (cpu_has_mips_r2_r6) {
u64 ebase; unsignedint status;
/* {read,write}_c0_ebase_64() may be UNDEFINED prior to r6 */
ebase = cpu_has_mips64r6 ? read_c0_ebase_64()
: (s32)read_c0_ebase(); if (ebase & MIPS_EBASE_WG) { /* WG bit already set, we can avoid the clumsy probe */
c->options |= MIPS_CPU_EBASE_WG;
} else { /* Its UNDEFINED to change EBase while BEV=0 */
status = read_c0_status();
write_c0_status(status | ST0_BEV);
irq_enable_hazard(); /* * On pre-r6 cores, this may well clobber the upper bits * of EBase. This is hard to avoid without potentially * hitting UNDEFINED dm*c0 behaviour if EBase is 32-bit.
*/ if (cpu_has_mips64r6)
write_c0_ebase_64(ebase | MIPS_EBASE_WG); else
write_c0_ebase(ebase | MIPS_EBASE_WG);
back_to_back_c0_hazard(); /* Restore BEV */
write_c0_status(status); if (read_c0_ebase() & MIPS_EBASE_WG) {
c->options |= MIPS_CPU_EBASE_WG;
write_c0_ebase(ebase);
}
}
}
/* configure the FTLB write probability */
set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
mips_probe_watch_registers(c);
#ifndef CONFIG_MIPS_CPS if (cpu_has_mips_r2_r6) { unsignedint core;
/* * Probe for certain guest capabilities by writing config bits and reading back. * Finally write back the original value.
*/ #define probe_gc0_config(name, maxconf, bits) \ do { \ unsignedint tmp; \
tmp = read_gc0_##name(); \
write_gc0_##name(tmp | (bits)); \
back_to_back_c0_hazard(); \
maxconf = read_gc0_##name(); \
write_gc0_##name(tmp); \
} while (0)
/* * Probe for dynamic guest capabilities by changing certain config bits and * reading back to see if they change. Finally write back the original value.
*/ #define probe_gc0_config_dyn(name, maxconf, dynconf, bits) \ do { \
maxconf = read_gc0_##name(); \
write_gc0_##name(maxconf ^ (bits)); \
back_to_back_c0_hazard(); \
dynconf = maxconf ^ read_gc0_##name(); \
write_gc0_##name(maxconf); \
maxconf |= dynconf; \
} while (0)
ok = decode_guest_config0(c); if (ok)
ok = decode_guest_config1(c); if (ok)
ok = decode_guest_config2(c); if (ok)
ok = decode_guest_config3(c); if (ok)
ok = decode_guest_config4(c); if (ok)
decode_guest_config5(c);
}
if (guestctl0 & MIPS_GCTL0_G0E)
c->options |= MIPS_CPU_GUESTCTL0EXT; if (guestctl0 & MIPS_GCTL0_G1)
c->options |= MIPS_CPU_GUESTCTL1; if (guestctl0 & MIPS_GCTL0_G2)
c->options |= MIPS_CPU_GUESTCTL2; if (!(guestctl0 & MIPS_GCTL0_RAD)) {
c->options |= MIPS_CPU_GUESTID;
/* * Probe for Direct Root to Guest (DRG). Set GuestCtl1.RID = 0 * first, otherwise all data accesses will be fully virtualised * as if they were performed by guest mode.
*/
write_c0_guestctl1(0);
tlbw_use_hazard();
staticinlinevoid cpu_probe_guestctl1(struct cpuinfo_mips *c)
{ if (cpu_has_guestid) { /* determine the number of bits of GuestID available */
write_c0_guestctl1(MIPS_GCTL1_ID);
back_to_back_c0_hazard();
c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID)
>> MIPS_GCTL1_ID_SHIFT;
write_c0_guestctl1(0);
}
}
staticinlinevoid cpu_probe_gtoffset(struct cpuinfo_mips *c)
{ /* determine the number of bits of GTOffset available */
write_c0_gtoffset(0xffffffff);
back_to_back_c0_hazard();
c->gtoffset_mask = read_c0_gtoffset();
write_c0_gtoffset(0);
}
staticinlinevoid cpu_probe_vz(struct cpuinfo_mips *c)
{
cpu_probe_guestctl0(c); if (cpu_has_guestctl1)
cpu_probe_guestctl1(c);
/* * SC and MC versions can't be reliably told apart, * but only the latter support coherent caching * modes so assume the firmware has set the KSEG0 * coherency attribute reasonably (if uncached, we * assume SC).
*/ switch (cca) { case CONF_CM_CACHABLE_CE: case CONF_CM_CACHABLE_COW: case CONF_CM_CACHABLE_CUW:
mc = 1; break; default:
mc = 0; break;
} if ((c->processor_id & PRID_REV_MASK) >=
PRID_REV_R4400) {
c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
__cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
} else {
c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
__cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
}
}
switch (__get_cpu_type(c->cputype)) { case CPU_M5150: case CPU_P5600:
set_isa(c, MIPS_CPU_ISA_M32R5); break; case CPU_I6500:
c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES;
fallthrough; case CPU_I6400:
c->options |= MIPS_CPU_SHARED_FTLB_RAM;
fallthrough; default: break;
}
/* Recent MIPS cores use the implementation-dependent ExcCode 16 for * cache/FTLB parity exceptions.
*/ switch (__get_cpu_type(c->cputype)) { case CPU_PROAPTIV: case CPU_P5600: case CPU_P6600: case CPU_I6400: case CPU_I6500:
c->options |= MIPS_CPU_FTLBPAREX; break;
}
}
staticinlinevoid cpu_probe_alchemy(struct cpuinfo_mips *c, unsignedint cpu)
{
decode_configs(c); switch (c->processor_id & PRID_IMP_MASK) { case PRID_IMP_AU1_REV1: case PRID_IMP_AU1_REV2:
c->cputype = CPU_ALCHEMY; switch ((c->processor_id >> 24) & 0xff) { case 0:
__cpu_name[cpu] = "Au1000"; break; case 1:
__cpu_name[cpu] = "Au1500"; break; case 2:
__cpu_name[cpu] = "Au1100"; break; case 3:
__cpu_name[cpu] = "Au1550"; break; case 4:
__cpu_name[cpu] = "Au1200"; if ((c->processor_id & PRID_REV_MASK) == 2)
__cpu_name[cpu] = "Au1250"; break; case 5:
__cpu_name[cpu] = "Au1210"; break; default:
__cpu_name[cpu] = "Au1xxx"; break;
} break; case PRID_IMP_NETLOGIC_AU13XX:
c->cputype = CPU_ALCHEMY;
__cpu_name[cpu] = "Au1300"; break;
}
}
/* * XBurst misses a config2 register, so config3 decode was skipped in * decode_configs().
*/
decode_config3(c);
/* XBurst does not implement the CP0 counter. */
c->options &= ~MIPS_CPU_COUNTER;
BUG_ON(__builtin_constant_p(cpu_has_counter) && cpu_has_counter);
/* XBurst has virtually tagged icache */
c->icache.flags |= MIPS_CACHE_VTAG;
switch (c->processor_id & PRID_IMP_MASK) {
/* XBurst®1 with MXU1.0/MXU1.1 SIMD ISA */ case PRID_IMP_XBURST_REV1:
/* * The XBurst core by default attempts to avoid branch target * buffer lookups by detecting & special casing loops. This * feature will cause BogoMIPS and lpj calculate in error. * Set cp0 config7 bit 4 to disable this feature.
*/
set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
switch (c->processor_id & PRID_COMP_MASK) {
/* * The config0 register in the XBurst CPUs with a processor ID of * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible, * but they don't actually support this ISA.
*/ case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
/* FPU is not properly detected on JZ4760(B). */ if (c->processor_id == 0x2ed0024f)
c->options |= MIPS_CPU_FPU;
fallthrough;
/* * The config0 register in the XBurst CPUs with a processor ID of * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned * huge page tlb mode, this mode is not compatible with the MIPS * standard, it will cause tlbmiss and into an infinite loop * (line 21 in the tlb-funcs.S) when starting the init process. * After chip reset, the default is HPTLB mode, Write 0xa9000000 * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent * getting stuck.
*/ case PRID_COMP_INGENIC_D1:
write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS); break;
default: break;
}
fallthrough;
/* XBurst®1 with MXU2.0 SIMD ISA */ case PRID_IMP_XBURST_REV2: /* Ingenic uses the WA bit to achieve write-combine memory writes */
c->writecombine = _CACHE_CACHABLE_WA;
c->cputype = CPU_XBURST;
__cpu_name[cpu] = "Ingenic XBurst"; break;
/* XBurst®2 with MXU2.1 SIMD ISA */ case PRID_IMP_XBURST2:
c->cputype = CPU_XBURST;
__cpu_name[cpu] = "Ingenic XBurst II"; break;
c->processor_id = read_c0_prid(); switch (c->processor_id & PRID_COMP_MASK) { case PRID_COMP_LEGACY:
cpu_probe_legacy(c, cpu); break; case PRID_COMP_MIPS:
cpu_probe_mips(c, cpu); break; case PRID_COMP_ALCHEMY: case PRID_COMP_NETLOGIC:
cpu_probe_alchemy(c, cpu); break; case PRID_COMP_SIBYTE:
cpu_probe_sibyte(c, cpu); break; case PRID_COMP_BROADCOM:
cpu_probe_broadcom(c, cpu); break; case PRID_COMP_SANDCRAFT:
cpu_probe_sandcraft(c, cpu); break; case PRID_COMP_NXP:
cpu_probe_nxp(c, cpu); break; case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu); break; case PRID_COMP_LOONGSON:
cpu_probe_loongson(c, cpu); break; case PRID_COMP_INGENIC_13: case PRID_COMP_INGENIC_D0: case PRID_COMP_INGENIC_D1: case PRID_COMP_INGENIC_E1:
cpu_probe_ingenic(c, cpu); break;
}
/* * Platform code can force the cpu type to optimize code * generation. In that case be sure the cpu type is correctly * manually setup otherwise it could trigger some nasty bugs.
*/
BUG_ON(current_cpu_type() != c->cputype);
if (cpu_has_rixi) { /* Enable the RIXI exceptions */
set_c0_pagegrain(PG_IEC);
back_to_back_c0_hazard(); /* Verify the IEC bit is set */ if (read_c0_pagegrain() & PG_IEC)
c->options |= MIPS_CPU_RIXIEX;
}
if (mips_fpu_disabled)
c->options &= ~MIPS_CPU_FPU;
if (mips_dsp_disabled)
c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
if (cpu_has_mips16)
elf_hwcap |= HWCAP_MIPS_MIPS16;
if (cpu_has_mdmx)
elf_hwcap |= HWCAP_MIPS_MDMX;
if (cpu_has_mips3d)
elf_hwcap |= HWCAP_MIPS_MIPS3D;
if (cpu_has_smartmips)
elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
if (cpu_has_dsp)
elf_hwcap |= HWCAP_MIPS_DSP;
if (cpu_has_dsp2)
elf_hwcap |= HWCAP_MIPS_DSP2;
if (cpu_has_dsp3)
elf_hwcap |= HWCAP_MIPS_DSP3;
if (cpu_has_mips16e2)
elf_hwcap |= HWCAP_MIPS_MIPS16E2;
if (cpu_has_loongson_mmi)
elf_hwcap |= HWCAP_LOONGSON_MMI;
if (cpu_has_loongson_ext)
elf_hwcap |= HWCAP_LOONGSON_EXT;
if (cpu_has_loongson_ext2)
elf_hwcap |= HWCAP_LOONGSON_EXT2;
if (cpu_has_vz)
cpu_probe_vz(c);
cpu_probe_vmbits(c);
/* Synthesize CPUCFG data if running on Loongson processors; * no-op otherwise. * * This looks at previously probed features, so keep this at bottom.
*/
loongson3_cpucfg_synthesize_data(c);
pr_info("CPU%d revision is: %08x (%s)\n",
smp_processor_id(), c->processor_id, cpu_name_string()); if (c->options & MIPS_CPU_FPU)
printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); if (cpu_has_msa)
pr_info("MSA revision is: %08x\n", c->msa_id);
}
void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsignedint cluster)
{ /* Ensure the core number fits in the field */
WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >>
MIPS_GLOBALNUMBER_CLUSTER_SHF));
void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsignedint core)
{ /* Ensure the core number fits in the field */
WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF));
void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsignedint vpe)
{ /* Ensure the VP(E) ID fits in the field */
WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF));
/* Ensure we're not using VP(E)s without support */
WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) &&
!IS_ENABLED(CONFIG_CPU_MIPSR6));
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.