/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
staticint amdgpu_reset_xgmi_reset_on_init_suspend(struct amdgpu_device *adev)
{ int i;
for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; if (!adev->ip_blocks[i].status.hw) continue; /* displays are handled in phase1 */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) continue;
/* VCN FW shared region is in frambuffer, there are some flags * initialized in that region during sw_init. Make sure the region is * backed up.
*/
amdgpu_vcn_save_vcpu_bo(adev);
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
tmp_adev->reset_cntl->active_reset =
amdgpu_asic_reset_method(adev);
}
r = 0; /* Mode1 reset needs to be triggered on all devices together */
list_for_each_entry(tmp_adev, reset_device_list, reset_list) { /* For XGMI run all resets in parallel to speed up the process */ if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
r = -EALREADY; if (r) {
dev_err(tmp_adev->dev, "xgmi reset on init: reset failed with error, %d",
r); break;
}
}
/* For XGMI wait for all resets to complete before proceed */ if (!r) {
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
flush_work(&tmp_adev->xgmi_reset_work);
r = tmp_adev->asic_reset_res; if (r) break;
}
}
int amdgpu_reset_init(struct amdgpu_device *adev)
{ int ret = 0;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 12): case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_init(adev); break; case IP_VERSION(11, 0, 7):
ret = sienna_cichlid_reset_init(adev); break; case IP_VERSION(13, 0, 10):
ret = smu_v13_0_10_reset_init(adev); break; default: break;
}
return ret;
}
int amdgpu_reset_fini(struct amdgpu_device *adev)
{ int ret = 0;
switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { case IP_VERSION(13, 0, 2): case IP_VERSION(13, 0, 6): case IP_VERSION(13, 0, 12): case IP_VERSION(13, 0, 14):
ret = aldebaran_reset_fini(adev); break; case IP_VERSION(11, 0, 7):
ret = sienna_cichlid_reset_fini(adev); break; case IP_VERSION(13, 0, 10):
ret = smu_v13_0_10_reset_fini(adev); break; default: break;
}
reset_domain->wq = create_singlethread_workqueue(wq_name); if (!reset_domain->wq) {
DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
amdgpu_reset_put_reset_domain(reset_domain); return NULL;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.