#define PCI_ID_TABLE_ENTRY(A, N, M, T) { \
.descr = A, \
.n_devs_per_imc = N, \
.n_devs_per_sock = ARRAY_SIZE(A), \
.n_imcs_per_sock = M, \
.type = T \
}
/* Knight's Landing Support */ /* * KNL's memory channels are swizzled between memory controllers. * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
*/ #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840 /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN 0x7843 /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA 0x7844 /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0 0x782a /* SAD target - 1-29-1 (1 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1 0x782b /* Caching / Home Agent */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA 0x782c /* Device with TOLM and TOHM, 0-5-0 (1 of these) */ #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM 0x7810
/* * KNL differs from SB, IB, and Haswell in that it has multiple * instances of the same device with the same device ID, so we handle that * by creating as many copies in the table as we expect to find. * (Like device ID must be grouped together.)
*/
/* * Broadwell support * * DE processor: * - 1 IMC * - 2 DDR3 channels, 2 DPC per channel * EP processor: * - 1 or 2 IMC * - 4 DDR4 channels, 3 DPC per channel * EP 4S processor: * - 2 IMC * - 4 DDR4 channels, 3 DPC per channel * EX processor: * - 2 IMC * - each IMC interfaces with a SMI 2 channel * - each SMI channel interfaces with a scalable memory buffer * - each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
*/ #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0 0x6fa0 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1 0x6f60 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA 0x6fa8 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM 0x6f71 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA 0x6f68 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM 0x6f79 #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
staticconststruct pci_id_descr pci_dev_descr_broadwell[] = { /* first item must be the HA */
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0, 0, IMC0) },
{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1, 1, IMC1) },
/**************************************************************************** Ancillary status routines
****************************************************************************/
staticinlineint numrank(enum type type, u32 mtr)
{ int ranks = (1 << RANK_CNT_BITS(mtr)); int max = 4;
if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
max = 8;
if (ranks > max) {
edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
ranks, max, (unsignedint)RANK_CNT_BITS(mtr), mtr); return -EINVAL;
}
return ranks;
}
staticinlineint numrow(u32 mtr)
{ int rows = (RANK_WIDTH_BITS(mtr) + 12);
if (rows < 13 || rows > 18) {
edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
rows, (unsignedint)RANK_WIDTH_BITS(mtr), mtr); return -EINVAL;
}
return 1 << rows;
}
staticinlineint numcol(u32 mtr)
{ int cols = (COL_WIDTH_BITS(mtr) + 10);
if (cols > 12) {
edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
cols, (unsignedint)COL_WIDTH_BITS(mtr), mtr); return -EINVAL;
}
/* * If we have devices scattered across several busses that pertain * to the same memory controller, we'll lump them all together.
*/ if (multi_bus) { return list_first_entry_or_null(&sbridge_edac_list, struct sbridge_dev, list);
}
pci_read_config_dword(pvt->pci_ta, MCMTR, ®); if (GET_BITFIELD(reg, 14, 14)) { if (registered)
mtype = MEM_RDDR4; else
mtype = MEM_DDR4;
} else { if (registered)
mtype = MEM_RDDR3; else
mtype = MEM_DDR3;
}
out: return mtype;
}
staticenum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
{ /* for KNL value is fixed */ return DEV_X16;
}
staticenum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
{ /* there's no way to figure out */ return DEV_UNKNOWN;
}
staticenum dev_type __ibridge_get_width(u32 mtr)
{ enum dev_type type = DEV_UNKNOWN;
switch (mtr) { case 2:
type = DEV_X16; break; case 1:
type = DEV_X8; break; case 0:
type = DEV_X4; break;
}
return type;
}
staticenum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
{ /* * ddr3_width on the documentation but also valid for DDR4 on * Haswell
*/ return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
}
staticenum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
{ /* ddr3_width on the documentation but also valid for DDR4 */ return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
}
staticenum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
{ /* DDR4 RDIMMS and LRDIMMS are supported */ return MEM_RDDR4;
}
/* * Use the reporting bank number to determine which memory * controller (also known as "ha" for "home agent"). Sandy * Bridge only has one memory controller per socket, so the * answer is always zero.
*/ static u8 sbridge_get_ha(u8 bank)
{ return 0;
}
/* * On Ivy Bridge, Haswell and Broadwell the error may be in a * home agent bank (7, 8), or one of the per-channel memory * controller banks (9 .. 16).
*/ static u8 ibridge_get_ha(u8 bank)
{ switch (bank) { case 7 ... 8: return bank - 7; case 9 ... 16: return (bank - 9) / 4; default: return 0xff;
}
}
/* Not used, but included for safety/symmetry */ static u8 knl_get_ha(u8 bank)
{ return 0xff;
}
/* High 16 bits of TAD limit and offset. */ staticconst u32 knl_tad_dram_hi[] = {
0x408, 0x508, 0x608, 0x708,
0x808, 0x908, 0xa08, 0xb08,
};
/* Number of ways a tad entry is interleaved. */ staticconst u32 knl_tad_ways[] = {
8, 6, 4, 3, 2, 1,
};
/* * Retrieve the n'th Target Address Decode table entry * from the memory controller's TAD table. * * @pvt: driver private data * @entry: which entry you want to retrieve * @mc: which memory controller (0 or 1) * @offset: output tad range offset * @limit: output address of first byte above tad range * @ways: output number of interleave ways * * The offset value has curious semantics. It's a sort of running total * of the sizes of all the memory regions that aren't mapped in this * tad table.
*/ staticint knl_get_tad(conststruct sbridge_pvt *pvt, constint entry, constint mc,
u64 *offset,
u64 *limit, int *ways)
{
u32 reg_limit_lo, reg_offset_lo, reg_hi; struct pci_dev *pci_mc; int way_id;
switch (mc) { case 0:
pci_mc = pvt->knl.pci_mc0; break; case 1:
pci_mc = pvt->knl.pci_mc1; break; default:
WARN_ON(1); return -EINVAL;
}
/* Is this TAD entry enabled? */ if (!GET_BITFIELD(reg_limit_lo, 0, 0)) return -ENODEV;
way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
if (way_id < ARRAY_SIZE(knl_tad_ways)) {
*ways = knl_tad_ways[way_id];
} else {
*ways = 0;
sbridge_printk(KERN_ERR, "Unexpected value %d in mc_tad_limit_lo wayness field\n",
way_id); return -ENODEV;
}
/* * The least significant 6 bits of base and limit are truncated. * For limit, we fill the missing bits with 1s.
*/
*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
((u64) GET_BITFIELD(reg_hi, 0, 15) << 32);
*limit = ((u64) GET_BITFIELD(reg_limit_lo, 6, 31) << 6) | 63 |
((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
return 0;
}
/* Determine which memory controller is responsible for a given channel. */ staticint knl_channel_mc(int channel)
{
WARN_ON(channel < 0 || channel >= 6);
return channel < 3 ? 1 : 0;
}
/* * Get the Nth entry from EDC_ROUTE_TABLE register. * (This is the per-tile mapping of logical interleave targets to * physical EDC modules.) * * entry 0: 0:2 * 1: 3:5 * 2: 6:8 * 3: 9:11 * 4: 12:14 * 5: 15:17 * 6: 18:20 * 7: 21:23 * reserved: 24:31
*/ static u32 knl_get_edc_route(int entry, u32 reg)
{
WARN_ON(entry >= KNL_MAX_EDCS); return GET_BITFIELD(reg, entry*3, (entry*3)+2);
}
/* * Get the Nth entry from MC_ROUTE_TABLE register. * (This is the per-tile mapping of logical interleave targets to * physical DRAM channels modules.) * * entry 0: mc 0:2 channel 18:19 * 1: mc 3:5 channel 20:21 * 2: mc 6:8 channel 22:23 * 3: mc 9:11 channel 24:25 * 4: mc 12:14 channel 26:27 * 5: mc 15:17 channel 28:29 * reserved: 30:31 * * Though we have 3 bits to identify the MC, we should only see * the values 0 or 1.
*/
static u32 knl_get_mc_route(int entry, u32 reg)
{ int mc, chan;
/* * Render the EDC_ROUTE register in human-readable form. * Output string s should be at least KNL_MAX_EDCS*2 bytes.
*/ staticvoid knl_show_edc_route(u32 reg, char *s)
{ int i;
for (i = 0; i < KNL_MAX_EDCS; i++) {
s[i*2] = knl_get_edc_route(i, reg) + '0';
s[i*2+1] = '-';
}
s[KNL_MAX_EDCS*2 - 1] = '\0';
}
/* * Render the MC_ROUTE register in human-readable form. * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
*/ staticvoid knl_show_mc_route(u32 reg, char *s)
{ int i;
for (i = 0; i < KNL_MAX_CHANNELS; i++) {
s[i*2] = knl_get_mc_route(i, reg) + '0';
s[i*2+1] = '-';
}
/* Is this dram rule backed by regular DRAM in flat mode? */ #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
/* Is this dram rule cached? */ #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
/* Is this rule backed by edc ? */ #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
/* Is this rule backed by DRAM, cacheable in EDRAM? */ #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
/* Is this rule mod3? */ #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
/* * Figure out how big our RAM modules are. * * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we * have to figure this out from the SAD rules, interleave lists, route tables, * and TAD rules. * * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to * inspect the TAD rules to figure out how large the SAD regions really are. * * When we know the real size of a SAD region and how many ways it's * interleaved, we know the individual contribution of each channel to * TAD is size/ways. * * Finally, we have to check whether each channel participates in each SAD * region. * * Fortunately, KNL only supports one DIMM per channel, so once we know how * much memory the channel uses, we know the DIMM is at least that large. * (The BIOS might possibly choose not to map all available memory, in which * case we will underreport the size of the DIMM.) * * In theory, we could try to determine the EDC sizes as well, but that would * only work in flat mode, not in cache mode. * * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS * elements)
*/ staticint knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
{
u64 sad_base, sad_limit = 0;
u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace; int sad_rule = 0; int tad_rule = 0; int intrlv_ways, tad_ways;
u32 first_pkg, pkg; int i;
u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
u32 dram_rule, interleave_reg;
u32 mc_route_reg[KNL_MAX_CHAS];
u32 edc_route_reg[KNL_MAX_CHAS]; int edram_only; char edc_route_string[KNL_MAX_EDCS*2]; char mc_route_string[KNL_MAX_CHANNELS*2]; int cur_reg_start; int mc; int channel; int participants[KNL_MAX_CHANNELS];
for (i = 0; i < KNL_MAX_CHANNELS; i++)
mc_sizes[i] = 0;
/* Read the EDC route table in each CHA. */
cur_reg_start = 0; for (i = 0; i < KNL_MAX_CHAS; i++) {
pci_read_config_dword(pvt->knl.pci_cha[i],
KNL_EDC_ROUTE, &edc_route_reg[i]);
if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
knl_show_edc_route(edc_route_reg[i-1],
edc_route_string); if (cur_reg_start == i-1)
edac_dbg(0, "edc route table for CHA %d: %s\n",
cur_reg_start, edc_route_string); else
edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, edc_route_string);
cur_reg_start = i;
}
}
knl_show_edc_route(edc_route_reg[i-1], edc_route_string); if (cur_reg_start == i-1)
edac_dbg(0, "edc route table for CHA %d: %s\n",
cur_reg_start, edc_route_string); else
edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, edc_route_string);
/* Read the MC route table in each CHA. */
cur_reg_start = 0; for (i = 0; i < KNL_MAX_CHAS; i++) {
pci_read_config_dword(pvt->knl.pci_cha[i],
KNL_MC_ROUTE, &mc_route_reg[i]);
if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
knl_show_mc_route(mc_route_reg[i-1], mc_route_string); if (cur_reg_start == i-1)
edac_dbg(0, "mc route table for CHA %d: %s\n",
cur_reg_start, mc_route_string); else
edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, mc_route_string);
cur_reg_start = i;
}
}
knl_show_mc_route(mc_route_reg[i-1], mc_route_string); if (cur_reg_start == i-1)
edac_dbg(0, "mc route table for CHA %d: %s\n",
cur_reg_start, mc_route_string); else
edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
cur_reg_start, i-1, mc_route_string);
/* Process DRAM rules */ for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) { /* previous limit becomes the new base */
sad_base = sad_limit;
/* * Find out how many ways this dram rule is interleaved. * We stop when we see the first channel again.
*/
first_pkg = sad_pkg(pvt->info.interleave_pkg,
interleave_reg, 0); for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
pkg = sad_pkg(pvt->info.interleave_pkg,
interleave_reg, intrlv_ways);
if ((pkg & 0x8) == 0) { /* * 0 bit means memory is non-local, * which KNL doesn't support
*/
edac_dbg(0, "Unexpected interleave target %d\n",
pkg); return -1;
}
if (pkg == first_pkg) break;
} if (KNL_MOD3(dram_rule))
intrlv_ways *= 3;
/* * Find out how big the SAD region really is by iterating * over TAD tables (SAD regions may contain holes). * Each memory controller might have a different TAD table, so * we have to look at both. * * Livespace is the memory that's mapped in this TAD table, * deadspace is the holes (this could be the MMIO hole, or it * could be memory that's mapped by the other TAD table but * not this one).
*/ for (mc = 0; mc < 2; mc++) {
sad_actual_size[mc] = 0;
tad_livespace = 0; for (tad_rule = 0;
tad_rule < ARRAY_SIZE(
knl_tad_dram_limit_lo);
tad_rule++) { if (knl_get_tad(pvt,
tad_rule,
mc,
&tad_deadspace,
&tad_limit,
&tad_ways)) break;
if (tad_base < sad_base) { if (tad_limit > sad_base)
edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
} elseif (tad_base < sad_limit) { if (tad_limit+1 > sad_limit) {
edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
} else { /* TAD region is completely inside SAD region */
edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
tad_rule, tad_base,
tad_limit, tad_size,
mc);
sad_actual_size[mc] += tad_size;
}
}
}
}
for (mc = 0; mc < 2; mc++) {
edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
mc, sad_actual_size[mc], sad_actual_size[mc]);
}
/* Ignore EDRAM rule */ if (edram_only) continue;
/* Figure out which channels participate in interleave. */ for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
participants[channel] = 0;
/* For each channel, does at least one CHA have * this channel mapped to the given target?
*/ for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { int target; int cha;
for (target = 0; target < KNL_MAX_CHANNELS; target++) { for (cha = 0; cha < KNL_MAX_CHAS; cha++) { if (knl_get_mc_route(target,
mc_route_reg[cha]) == channel
&& !participants[channel]) {
participants[channel] = 1; break;
}
}
}
}
for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
mc = knl_channel_mc(channel); if (participants[channel]) {
edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
channel,
sad_actual_size[mc]/intrlv_ways,
sad_rule);
mc_sizes[channel] +=
sad_actual_size[mc]/intrlv_ways;
}
}
}
for (i = 0; i < channels; i++) {
u32 mtr, amap = 0;
int max_dimms_per_channel;
if (pvt->info.type == KNIGHTS_LANDING) {
max_dimms_per_channel = 1; if (!pvt->knl.pci_channel[i]) continue;
} else {
max_dimms_per_channel = ARRAY_SIZE(mtr_regs); if (!pvt->pci_tad[i]) continue;
pci_read_config_dword(pvt->pci_tad[i], 0x8c, &amap);
}
for (j = 0; j < max_dimms_per_channel; j++) {
dimm = edac_get_dimm(mci, i, j, 0); if (pvt->info.type == KNIGHTS_LANDING) {
pci_read_config_dword(pvt->knl.pci_channel[i],
knl_mtr_reg, &mtr);
} else {
pci_read_config_dword(pvt->pci_tad[i],
mtr_regs[j], &mtr);
}
edac_dbg(4, "Channel #%d MTR%d = %x\n", i, j, mtr);
if (IS_DIMM_PRESENT(mtr)) { if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
pvt->sbridge_dev->source_id,
pvt->sbridge_dev->dom, i); return -ENODEV;
}
pvt->channel[i].dimms++;
ranks = numrank(pvt->info.type, mtr);
if (pvt->info.type == KNIGHTS_LANDING) { /* For DDR4, this is fixed. */
cols = 1 << 10;
rows = knl_mc_sizes[i] /
((u64) cols * ranks * banks * 8);
} else {
rows = numrow(mtr);
cols = numcol(mtr);
}
/* * Step 2) Get SAD range and SAD Interleave list * TAD registers contain the interleave wayness. However, it * seems simpler to just discover it indirectly, with the * algorithm bellow.
*/
prv = 0; for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) { /* SAD_LIMIT Address range is 45:26 */
pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
®);
limit = pvt->info.sad_limit(reg);
/* * Step 4) Get TAD offsets, per each channel
*/ for (i = 0; i < NUM_CHANNELS; i++) { if (!pvt->channel[i].dimms) continue; for (j = 0; j < n_tads; j++) {
pci_read_config_dword(pvt->pci_tad[i],
tad_ch_nilv_offset[j],
®);
tmp_mb = TAD_OFFSET(reg) >> 20;
gb = div_u64_rem(tmp_mb, 1024, &mb);
edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
i, j,
gb, (mb*1000)/1024,
((u64)tmp_mb) << 20L,
reg);
}
}
/* * Step 6) Get RIR Wayness/Limit, per each channel
*/ for (i = 0; i < NUM_CHANNELS; i++) { if (!pvt->channel[i].dimms) continue; for (j = 0; j < MAX_RIR_RANGES; j++) {
pci_read_config_dword(pvt->pci_tad[i],
rir_way_limit[j],
®);
staticint sb_bits(u64 addr, int nbits, u8 *bits)
{ int i, res = 0;
for (i = 0; i < nbits; i++)
res |= ((addr >> bits[i]) & 1) << i; return res;
}
staticint sb_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
{ int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
if (do_xor)
ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
/* * Step 0) Check if the address is at special memory ranges * The check bellow is probably enough to fill all cases where * the error is not inside a memory, except for the legacy * range (e. g. VGA addresses). It is unlikely, however, that the * memory controller would generate an error on that range.
*/ if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); return -EINVAL;
} if (addr >= (u64)pvt->tohm) {
sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); return -EINVAL;
}
/* * Step 1) Get socket
*/ for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
®);
if (!DRAM_RULE_ENABLE(reg)) continue;
limit = pvt->info.sad_limit(reg); if (limit <= prv) {
sprintf(msg, "Can't discover the memory socket"); return -EINVAL;
} if (addr <= limit) break;
prv = limit;
} if (n_sads == pvt->info.max_sad) {
sprintf(msg, "Can't discover the memory socket"); return -EINVAL;
}
dram_rule = reg;
*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
interleave_mode = pvt->info.interleave_mode(dram_rule);
/* * Move to the proper node structure, in order to access the * right PCI registers
*/
new_mci = get_mci_for_node_id(*socket, sad_ha); if (!new_mci) {
sprintf(msg, "Struct for socket #%u wasn't initialized",
*socket); return -EINVAL;
}
mci = new_mci;
pvt = mci->pvt_info;
/* * Step 2) Get memory channel
*/
prv = 0;
pci_ha = pvt->pci_ha; for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], ®);
limit = TAD_LIMIT(reg); if (limit <= prv) {
sprintf(msg, "Can't discover the memory channel"); return -EINVAL;
} if (addr <= limit) break;
prv = limit;
} if (n_tads == MAX_TAD) {
sprintf(msg, "Can't discover the memory channel"); return -EINVAL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.