/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * *
*/ #include <linux/debugfs.h> #include <linux/list.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/reboot.h> #include <linux/syscalls.h> #include <linux/pm_runtime.h> #include <linux/list_sort.h>
dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n");
dev_warn(adev->dev, "Clear EEPROM:\n");
dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n");
if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL;
/* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
}
s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", "ue", info.ue_count, "ce", info.ce_count); if (*pos >= s) return 0;
s -= *pos;
s = min_t(u64, s, size);
if (copy_to_user(buf, &val[*pos], s)) return -EINVAL;
staticint amdgpu_ras_find_block_id_by_name(constchar *name, int *block_id)
{ int i;
for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
*block_id = i; if (strcmp(name, ras_block_string[i]) == 0) return 0;
} return -EINVAL;
}
staticint amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, constchar __user *buf, size_t size,
loff_t *pos, struct ras_debug_if *data)
{
ssize_t s = min_t(u64, 64, size); char str[65]; char block_name[33]; char err[9] = "ue"; int op = -1; int block_id;
uint32_t sub_block;
u64 address, value; /* default value is 0 if the mask is not set by user */
u32 instance_mask = 0;
/* no need to set instance mask if there is only one instance */ if (num_xcc <= 1 && inst_mask) {
data->inject.instance_mask = 0;
dev_dbg(adev->dev, "RAS inject mask(0x%x) isn't supported and force it to 0.\n",
inst_mask);
return;
}
switch (data->head.block) { case AMDGPU_RAS_BLOCK__GFX:
mask = GENMASK(num_xcc - 1, 0); break; case AMDGPU_RAS_BLOCK__SDMA:
mask = GENMASK(adev->sdma.num_instances - 1, 0); break; case AMDGPU_RAS_BLOCK__VCN: case AMDGPU_RAS_BLOCK__JPEG:
mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0); break; default:
mask = inst_mask; break;
}
/* remove invalid bits in instance mask */
data->inject.instance_mask &= mask; if (inst_mask != data->inject.instance_mask)
dev_dbg(adev->dev, "Adjust RAS inject mask 0x%x to 0x%x\n",
inst_mask, data->inject.instance_mask);
}
/** * DOC: AMDGPU RAS debugfs control interface * * The control interface accepts struct ras_debug_if which has two members. * * First member: ras_debug_if::head or ras_debug_if::inject. * * head is used to indicate which IP block will be under control. * * head has four members, they are block, type, sub_block_index, name. * block: which IP will be under control. * type: what kind of error will be enabled/disabled/injected. * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. * name: the name of IP. * * inject has three more members than head, they are address, value and mask. * As their names indicate, inject operation will write the * value to the address. * * The second member: struct ras_debug_if::op. * It has three kinds of operations. * * - 0: disable RAS on the block. Take ::head as its data. * - 1: enable RAS on the block. Take ::head as its data. * - 2: inject errors on the block. Take ::inject as its data. * * How to use the interface? * * In a program * * Copy the struct ras_debug_if in your code and initialize it. * Write the struct to the control interface. * * From shell * * .. code-block:: bash * * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl * * Where N, is the card which you want to affect. * * "disable" requires only the block. * "enable" requires the block and error type. * "inject" requires the block, error type, address, and value. * * The block is one of: umc, sdma, gfx, etc. * see ras_block_string[] for details * * The error type is one of: ue, ce and poison where, * ue is multi-uncorrectable * ce is single-correctable * poison is poison * * The sub-block is a the sub-block index, pass 0 if there is no sub-block. * The address and value are hexadecimal numbers, leading 0x is optional. * The mask means instance mask, is optional, default value is 0x1. * * For instance, * * .. code-block:: bash * * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl * * How to check the result of the operation? * * To check disable/enable, see "ras" features at, * /sys/class/drm/card[0/1/2...]/device/ras/features * * To check inject, see the corresponding error count at, * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count * * .. note:: * Operations are only allowed on blocks which are supported. * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask * to see which blocks support RAS on a particular asic. *
*/ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, constchar __user *buf,
size_t size, loff_t *pos)
{ struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; struct ras_debug_if data; int ret = 0;
ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, &data); if (ret) return ret;
if (data.op == 3) {
ret = amdgpu_reserve_page_direct(adev, data.inject.address); if (!ret) return size; else return ret;
}
if (!amdgpu_ras_is_supported(adev, data.head.block)) return -EINVAL;
switch (data.op) { case 0:
ret = amdgpu_ras_feature_enable(adev, &data.head, 0); break; case 1:
ret = amdgpu_ras_feature_enable(adev, &data.head, 1); break; case 2: /* umc ce/ue error injection for a bad page is not allowed */ if (data.head.block == AMDGPU_RAS_BLOCK__UMC)
ret = amdgpu_ras_check_bad_page(adev, data.inject.address); if (ret == -EINVAL) {
dev_warn(adev->dev, "RAS WARN: input address 0x%llx is invalid.",
data.inject.address); break;
} elseif (ret == 1) {
dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has already been marked as bad!\n",
data.inject.address); break;
}
amdgpu_ras_instance_mask_check(adev, &data);
/* data.inject.address is offset instead of absolute gpu address */
ret = amdgpu_ras_error_inject(adev, &data.inject); break; default:
ret = -EINVAL; break;
}
if (ret) return ret;
return size;
}
/** * DOC: AMDGPU RAS debugfs EEPROM table reset interface * * Some boards contain an EEPROM which is used to persistently store a list of * bad pages which experiences ECC errors in vram. This interface provides * a way to reset the EEPROM, e.g., after testing error injection. * * Usage: * * .. code-block:: bash * * echo 1 > ../ras/ras_eeprom_reset * * will reset EEPROM table to 0 entries. *
*/ static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, constchar __user *buf,
size_t size, loff_t *pos)
{ struct amdgpu_device *adev =
(struct amdgpu_device *)file_inode(f)->i_private; int ret;
ret = amdgpu_ras_eeprom_reset_table(
&(amdgpu_ras_get_context(adev)->eeprom_control));
if (!ret) { /* Something was written to EEPROM.
*/
amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; return size;
} else { return ret;
}
}
/** * DOC: AMDGPU RAS sysfs Error Count Interface * * It allows the user to read the error count for each IP block on the gpu through * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count * * It outputs the multiple lines which report the uncorrected (ue) and corrected * (ce) error counts. * * The format of one line is below, * * [ce|ue]: count * * Example: * * .. code-block:: bash * * ue: 0 * ce: 1 *
*/ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, struct device_attribute *attr, char *buf)
{ struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); struct ras_query_if info = {
.head = obj->head,
};
if (!amdgpu_ras_get_error_query_ready(obj->adev)) return sysfs_emit(buf, "Query currently inaccessible\n");
if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL;
if (amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
amdgpu_ip_version(obj->adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
dev_warn(obj->adev->dev, "Failed to reset error counter and error status");
}
/* return an obj equal to head, or the first when head is NULL */ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, struct ras_common_if *head)
{ struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; int i;
if (!adev->ras_enabled || !con) return NULL;
if (head) { if (head->block >= AMDGPU_RAS_BLOCK_COUNT) return NULL;
if (head->block == AMDGPU_RAS_BLOCK__MCA) { if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) return NULL;
/* * if obj is not created, then create one. * set feature enable flag.
*/ staticint __amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, int enable)
{ struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj = amdgpu_ras_find_obj(adev, head);
/* If hardware does not support ras, then do not create obj. * But if hardware support ras, we can create the obj. * Ras framework checks con->hw_supported to see if it need do * corresponding initialization. * IP checks con->support to see if it need disable ras.
*/ if (!amdgpu_ras_is_feature_allowed(adev, head)) return 0;
if (enable) { if (!obj) {
obj = amdgpu_ras_create_obj(adev, head); if (!obj) return -EINVAL;
} else { /* In case we create obj somewhere else */
get_obj(obj);
}
con->features |= BIT(head->block);
} else { if (obj && amdgpu_ras_is_feature_enabled(adev, head)) {
con->features &= ~BIT(head->block);
put_obj(obj);
}
}
return 0;
}
/* wrapper of psp_ras_enable_features */ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable)
{ struct amdgpu_ras *con = amdgpu_ras_get_context(adev); union ta_ras_cmd_input *info; int ret;
if (!con) return -EINVAL;
/* For non-gfx ip, do not enable ras feature if it is not allowed */ /* For gfx ip, regardless of feature support status, */ /* Force issue enable or disable ras feature commands */ if (head->block != AMDGPU_RAS_BLOCK__GFX &&
!amdgpu_ras_is_feature_allowed(adev, head)) return 0;
/* Only enable gfx ras feature from host side */ if (head->block == AMDGPU_RAS_BLOCK__GFX &&
!amdgpu_sriov_vf(adev) &&
!amdgpu_ras_intr_triggered()) {
info = kzalloc(sizeof(union ta_ras_cmd_input), GFP_KERNEL); if (!info) return -ENOMEM;
/* setup the obj */
__amdgpu_ras_feature_enable(adev, head, enable);
return 0;
}
/* Only used in device probe stage and called only once. */ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, struct ras_common_if *head, bool enable)
{ struct amdgpu_ras *con = amdgpu_ras_get_context(adev); int ret;
if (!con) return -EINVAL;
if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { if (enable) { /* There is no harm to issue a ras TA cmd regardless of * the currecnt ras state. * If current state == target state, it will do nothing * But sometimes it requests driver to reset and repost * with error code -EAGAIN.
*/
ret = amdgpu_ras_feature_enable(adev, head, 1); /* With old ras TA, we might fail to enable ras. * Log it and just setup the object. * TODO need remove this WA in the future.
*/ if (ret == -EINVAL) {
ret = __amdgpu_ras_feature_enable(adev, head, 1); if (!ret)
dev_info(adev->dev, "RAS INFO: %s setup object\n",
get_ras_block_str(head));
}
} else { /* setup the object then issue a ras TA disable cmd.*/
ret = __amdgpu_ras_feature_enable(adev, head, 1); if (ret) return ret;
/* gfx block ras disable cmd must send to ras-ta */ if (head->block == AMDGPU_RAS_BLOCK__GFX)
con->features |= BIT(head->block);
ret = amdgpu_ras_feature_enable(adev, head, 0);
/* clean gfx block ras features flag */ if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX)
con->features &= ~BIT(head->block);
}
} else
ret = amdgpu_ras_feature_enable(adev, head, enable);
for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { struct ras_common_if head = {
.block = i,
.type = default_ras_type,
.sub_block_index = 0,
};
if (i == AMDGPU_RAS_BLOCK__MCA) continue;
if (bypass) { /* * bypass psp. vbios enable ras for us. * so just create the obj
*/ if (__amdgpu_ras_feature_enable(adev, &head, 1)) break;
} else { if (amdgpu_ras_feature_enable(adev, &head, 1)) break;
}
}
for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { struct ras_common_if head = {
.block = AMDGPU_RAS_BLOCK__MCA,
.type = default_ras_type,
.sub_block_index = i,
};
if (bypass) { /* * bypass psp. vbios enable ras for us. * so just create the obj
*/ if (__amdgpu_ras_feature_enable(adev, &head, 1)) break;
} else { if (amdgpu_ras_feature_enable(adev, &head, 1)) break;
}
}
list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { if (!node->ras_obj) {
dev_warn(adev->dev, "Warning: abnormal ras list node.\n"); continue;
}
obj = node->ras_obj; if (obj->ras_block_match) { if (obj->ras_block_match(obj, block, sub_block_index) == 0) return obj;
} else { if (amdgpu_ras_block_match_default(obj, block) == 0) return obj;
}
}
return NULL;
}
staticvoid amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data)
{ struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int ret = 0;
/* * choosing right query method according to * whether smu support query error information
*/
ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); if (ret == -EOPNOTSUPP) { if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
adev->umc.ras->ras_block.hw_ops->query_ras_error_count)
adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
/* umc query_ras_error_address is also responsible for clearing * error status
*/ if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops &&
adev->umc.ras->ras_block.hw_ops->query_ras_error_address)
adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data);
} elseif (!ret) { if (adev->umc.ras &&
adev->umc.ras->ecc_info_query_ras_error_count)
adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data);
if (adev->umc.ras &&
adev->umc.ras->ecc_info_query_ras_error_address)
adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data);
}
}
if (block_obj->hw_ops->ras_error_inject) { if (info->head.block == AMDGPU_RAS_BLOCK__GFX)
ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); else/* Special ras_error_inject is defined (e.g: xgmi) */
ret = block_obj->hw_ops->ras_error_inject(adev, &block_info,
info->instance_mask);
} else { /* default path */
ret = psp_ras_trigger_error(&adev->psp, &block_info, info->instance_mask);
}
if (ret)
dev_err(adev->dev, "ras inject %s failed %d\n",
get_ras_block_str(&info->head), ret);
return ret;
}
/** * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP * @adev: pointer to AMD GPU device * @ce_count: pointer to an integer to be set to the count of correctible errors. * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. * @query_info: pointer to ras_query_if * * Return 0 for query success or do nothing, otherwise return an error * on failures
*/ staticint amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, unsignedlong *ce_count, unsignedlong *ue_count, struct ras_query_if *query_info)
{ int ret;
if (!query_info) /* do nothing if query_info is not specified */ return 0;
ret = amdgpu_ras_query_error_status(adev, query_info); if (ret) return ret;
/* some hardware/IP supports read to clear
* no need to explictly reset the err status after the query call */ if (amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 2) &&
amdgpu_ip_version(adev, MP0_HWIP, 0) != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(adev, query_info->head.block))
dev_warn(adev->dev, "Failed to reset error counter and error status\n");
}
return 0;
}
/** * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP * @adev: pointer to AMD GPU device * @ce_count: pointer to an integer to be set to the count of correctible errors. * @ue_count: pointer to an integer to be set to the count of uncorrectible * errors. * @query_info: pointer to ras_query_if if the query request is only for * specific ip block; if info is NULL, then the qurey request is for * all the ip blocks that support query ras error counters/status * * If set, @ce_count or @ue_count, count and return the corresponding * error counts in those integer pointers. Return 0 if the device * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS.
*/ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, unsignedlong *ce_count, unsignedlong *ue_count, struct ras_query_if *query_info)
{ struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; unsignedlong ce, ue; int ret;
if (!adev->ras_enabled || !con) return -EOPNOTSUPP;
/* Don't count since no reporting.
*/ if (!ce_count && !ue_count) return 0;
ce = 0;
ue = 0; if (!query_info) { /* query all the ip blocks that support ras query interface */
list_for_each_entry(obj, &con->head, node) { struct ras_query_if info = {
.head = obj->head,
};
ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, &info);
}
} else { /* query specific ip block */
ret = amdgpu_ras_query_error_count_helper(adev, &ce, &ue, query_info);
}
staticchar *amdgpu_ras_badpage_flags_str(unsignedint flags)
{ switch (flags) { case AMDGPU_RAS_RETIRE_PAGE_RESERVED: return"R"; case AMDGPU_RAS_RETIRE_PAGE_PENDING: return"P"; case AMDGPU_RAS_RETIRE_PAGE_FAULT: default: return"F";
}
}
/** * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface * * It allows user to read the bad pages of vram on the gpu through * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages * * It outputs multiple lines, and each line stands for one gpu page. * * The format of one line is below, * gpu pfn : gpu page size : flags * * gpu pfn and gpu page size are printed in hex format. * flags can be one of below character, * * R: reserved, this gpu page is reserved and not able to use. * * P: pending for reserve, this gpu page is marked as bad, will be reserved * in next window of page_reserve. * * F: unable to reserve. this gpu page can't be reserved due to some reasons. * * Examples: * * .. code-block:: bash * * 0x00000001 : 0x00001000 : R * 0x00000002 : 0x00001000 : P *
*/
if (amdgpu_bad_page_threshold != 0)
amdgpu_ras_sysfs_remove_bad_page_node(adev);
amdgpu_ras_sysfs_remove_dev_attr_node(adev);
return 0;
} /* sysfs end */
/** * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors * * Normally when there is an uncorrectable error, the driver will reset * the GPU to recover. However, in the event of an unrecoverable error, * the driver provides an interface to reboot the system automatically * in that event. * * The following file in debugfs provides that interface: * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot * * Usage: * * .. code-block:: bash * * echo true > .../ras/auto_reboot *
*/ /* debugfs begin */ staticstruct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev)
{ struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *dir;
/* * After one uncorrectable error happens, usually GPU recovery will * be scheduled. But due to the known problem in GPU recovery failing * to bring GPU back, below interface provides one direct way to * user to reboot system automatically in such case within * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine * will never be called.
*/
debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, dir, &con->reboot);
/* * User could set this not to clean up hardware's error count register * of RAS IPs during ras recovery.
*/
debugfs_create_bool("disable_ras_err_cnt_harvest", 0644, dir,
&con->disable_ras_err_cnt_harvest); return dir;
}
if (IS_ENABLED(CONFIG_DEBUG_FS)) {
list_for_each_entry_safe(con_obj, tmp, &con->head, node) {
ip_obj = amdgpu_ras_find_obj(adev, &con_obj->head); if (ip_obj)
put_obj(ip_obj);
}
}
amdgpu_ras_sysfs_remove_all(adev); return 0;
} /* ras fs end */
/* ih begin */
/* For the hardware that cannot enable bif ring for both ras_controller_irq * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status * register to check whether the interrupt is triggered or not, and properly * ack the interrupt if it is there
*/ void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev)
{ /* Fatal error events are handled on host side */ if (amdgpu_sriov_vf(adev)) return; /* * If the current interrupt is caused by a non-fatal RAS error, skip * check for fatal error. For fatal errors, FED status of all devices * in XGMI hive gets set when the first device gets fatal error * interrupt. The error gets propagated to other devices as well, so * make sure to ack the interrupt regardless of FED status.
*/ if (!amdgpu_ras_get_fed_status(adev) &&
amdgpu_ras_is_err_state(adev, AMDGPU_RAS_BLOCK__ANY)) return;
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_controller_intr_no_bifring)
adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev);
if (adev->nbio.ras &&
adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring)
adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev);
}
ret = amdgpu_ras_mark_ras_event(adev, type); if (ret) return;
amdgpu_ras_set_err_poison(adev, block_obj->ras_comm.block); /* both query_poison_status and handle_poison_consumption are optional, * but at least one of them should be implemented if we need poison * consumption handler
*/ if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) {
poison_stat = block_obj->hw_ops->query_poison_status(adev); if (!poison_stat) { /* Not poison consumption interrupt, no need to handle it */
dev_info(adev->dev, "No RAS poison status in %s poison IH.\n",
block_obj->ras_comm.name);
ret = amdgpu_ras_error_data_init(&err_data); if (ret) return;
/* Let IP handle its data, maybe we need get the output * from the callback to update the error type/count, etc
*/
amdgpu_ras_set_fed(obj->adev, true);
ret = data->cb(obj->adev, &err_data, entry); /* ue will trigger an interrupt, and in that case * we need do a reset to recovery the whole system. * But leave IP do that recovery, here we just dispatch * the error.
*/ if (ret == AMDGPU_RAS_SUCCESS) { /* these counts could be left as 0 if * some blocks do not count error number
*/
obj->err_data.ue_count += err_data.ue_count;
obj->err_data.ce_count += err_data.ce_count;
obj->err_data.de_count += err_data.de_count;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.