// SPDX-License-Identifier: GPL-2.0 /* * Page Deallocation Table (PDT) support * * The Page Deallocation Table (PDT) is maintained by firmware and holds a * list of memory addresses in which memory errors were detected. * The list contains both single-bit (correctable) and double-bit * (uncorrectable) errors. * * Copyright 2017 by Helge Deller <deller@gmx.de> * * possible future enhancements: * - add userspace interface via procfs or sysfs to clear PDT
*/
/* * Constants for the pdt_entry format: * A pdt_entry holds the physical address in bits 0-57, bits 58-61 are * reserved, bit 62 is the perm bit and bit 63 is the error_type bit. * The perm bit indicates whether the error have been verified as a permanent * error (value of 1) or has not been verified, and may be transient (value * of 0). The error_type bit indicates whether the error is a single bit error * (value of 1) or a multiple bit error. * On non-PAT machines phys_addr is encoded in bits 0-59 and error_type in bit * 63. Those machines don't provide the perm bit.
*/
staticint get_info_pat_cell(void)
{ struct pdc_pat_mem_cell_pdt_retinfo cell_rinfo; int ret;
/* older PAT machines like rp5470 report cell info only */ if (is_pdc_pat())
ret = pdc_pat_mem_pdt_cell_info(&cell_rinfo, parisc_cell_num); else return PDC_BAD_PROC;
/* * pdc_pdt_init() * * Initialize kernel PDT structures, read initial PDT table from firmware, * report all current PDT entries and mark bad memory with memblock_reserve() * to avoid that the kernel will use broken memory areas. *
*/ void __init pdc_pdt_init(void)
{ int ret, i; unsignedlong entries; struct pdc_mem_read_pdt pdt_read_ret;
pdt_type = PDT_PAT_NEW;
ret = get_info_pat_new();
if (ret != PDC_OK) {
pdt_type = PDT_PAT_CELL;
ret = get_info_pat_cell();
}
if (ret != PDC_OK) {
pdt_type = PDT_PDC; /* non-PAT machines provide the standard PDC call */
ret = pdc_mem_pdt_info(&pdt_status);
}
if (ret != PDC_OK) {
pdt_type = PDT_NONE;
pr_info("PDT: Firmware does not provide any page deallocation" " information.\n"); return;
}
schedule_timeout(pdt_poll_interval); if (kthread_should_stop()) break;
/* Do we have new PDT entries? */ switch (pdt_type) { case PDT_PAT_NEW:
ret = get_info_pat_new(); break; case PDT_PAT_CELL:
ret = get_info_pat_cell(); break; default:
ret = pdc_mem_pdt_info(&pdt_status); break;
}
/* if no new PDT entries, just wait again */
num = pdt_status.pdt_entries - old_num_entries; if (num <= 0) continue;
/* decrease poll interval in case we found memory errors */ if (pdt_status.pdt_entries &&
pdt_poll_interval == PDT_POLL_INTERVAL_DEFAULT)
pdt_poll_interval = PDT_POLL_INTERVAL_SHORT;
/* limit entries to get */ if (num > MAX_PDT_ENTRIES) {
num = MAX_PDT_ENTRIES;
pdt_status.pdt_entries = old_num_entries + num;
}
/* get new entries */ switch (pdt_type) { #ifdef CONFIG_64BIT case PDT_PAT_CELL: if (pdt_status.pdt_entries > MAX_PDT_ENTRIES) {
pr_crit("PDT: too many entries.\n"); return -ENOMEM;
}
ret = pdc_pat_mem_read_cell_pdt(&pat_pret, pdt_entry,
MAX_PDT_ENTRIES);
bad_mem_ptr = &pdt_entry[old_num_entries]; break; case PDT_PAT_NEW:
ret = pdc_pat_mem_read_pd_pdt(&pat_pret,
pdt_entry,
num * sizeof(unsignedlong),
old_num_entries * sizeof(unsignedlong));
bad_mem_ptr = &pdt_entry[0]; break; #endif default:
ret = pdc_mem_pdt_read_entries(&pdt_read_ret,
pdt_entry);
bad_mem_ptr = &pdt_entry[old_num_entries]; break;
}
/* report and mark memory broken */ while (num--) { unsignedlong pde = *bad_mem_ptr++;
report_mem_err(pde);
#ifdef CONFIG_MEMORY_FAILURE if ((pde & PDT_ADDR_PERM_ERR) ||
((pde & PDT_ADDR_SINGLE_ERR) == 0))
memory_failure(pde >> PAGE_SHIFT, 0); else
soft_offline_page(pde >> PAGE_SHIFT, 0); #else
pr_crit("PDT: memory error at 0x%lx ignored.\n" "Rebuild kernel with CONFIG_MEMORY_FAILURE=y " "for real handling.\n",
pde & PDT_ADDR_PHYS_MASK); #endif
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.