// SPDX-License-Identifier: MIT /* * Copyright 2024 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/ #include"amdgpu.h" #include"amdgpu_gfx.h" #include"mes_userqueue.h" #include"amdgpu_userq_fence.h"
wptr_obj->obj = wptr_mapping->bo_va->base.bo; if (wptr_obj->obj->tbo.base.size > PAGE_SIZE) {
DRM_ERROR("Requested GART mapping for wptr bo larger than one page\n"); return -EINVAL;
}
ret = mes_userq_map_gtt_bo_to_gart(wptr_obj->obj); if (ret) {
DRM_ERROR("Failed to map wptr bo to GART\n"); return ret;
}
/* set process quantum to 10 ms and gang quantum to 1 ms as default */
queue_input.process_quantum = 100000;
queue_input.gang_quantum = 10000;
queue_input.paging = false;
amdgpu_mes_lock(&adev->mes);
r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
amdgpu_mes_unlock(&adev->mes); if (r)
DRM_ERROR("Failed to unmap queue in HW, err (%d)\n", r); return r;
}
/* * The FW expects at least one page space allocated for * process ctx and gang ctx each. Create an object * for the same.
*/
size = AMDGPU_USERQ_PROC_CTX_SZ + AMDGPU_USERQ_GANG_CTX_SZ;
r = amdgpu_userq_create_object(uq_mgr, ctx, size); if (r) {
DRM_ERROR("Failed to allocate ctx space bo for userqueue, err:%d\n", r); return r;
}
/* Structure to initialize MQD for userqueue using generic MQD init function */
userq_props = kzalloc(sizeof(struct amdgpu_mqd_prop), GFP_KERNEL); if (!userq_props) {
DRM_ERROR("Failed to allocate memory for userq_props\n"); return -ENOMEM;
}
r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size); if (r) {
DRM_ERROR("Failed to create MQD object for userqueue\n"); goto free_props;
}
/* Initialize the MQD BO with user given values */
userq_props->wptr_gpu_addr = mqd_user->wptr_va;
userq_props->rptr_gpu_addr = mqd_user->rptr_va;
userq_props->queue_size = mqd_user->queue_size;
userq_props->hqd_base_gpu_addr = mqd_user->queue_va;
userq_props->mqd_gpu_addr = queue->mqd.gpu_addr;
userq_props->use_doorbell = true;
userq_props->doorbell_index = queue->doorbell_index;
userq_props->fence_address = queue->fence_drv->gpu_addr;
if (adev->gfx.funcs->get_gfx_shadow_info)
adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd;
if (mqd_user->mqd_size != sizeof(*compute_mqd)) {
DRM_ERROR("Invalid compute IP MQD size\n");
r = -EINVAL; goto free_mqd;
}
compute_mqd = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); if (IS_ERR(compute_mqd)) {
DRM_ERROR("Failed to read user MQD\n");
r = -ENOMEM; goto free_mqd;
}
if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va,
max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) goto free_mqd;
if (mqd_user->mqd_size != sizeof(*mqd_gfx_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid GFX MQD\n");
r = -EINVAL; goto free_mqd;
}
mqd_gfx_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); if (IS_ERR(mqd_gfx_v11)) {
DRM_ERROR("Failed to read user MQD\n");
r = -ENOMEM; goto free_mqd;
}
if (mqd_user->mqd_size != sizeof(*mqd_sdma_v11) || !mqd_user->mqd) {
DRM_ERROR("Invalid SDMA MQD\n");
r = -EINVAL; goto free_mqd;
}
mqd_sdma_v11 = memdup_user(u64_to_user_ptr(mqd_user->mqd), mqd_user->mqd_size); if (IS_ERR(mqd_sdma_v11)) {
DRM_ERROR("Failed to read sdma user MQD\n");
r = -ENOMEM; goto free_mqd;
}
if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va,
shadow_info.csa_size)) goto free_mqd;
r = mqd_hw_default->init_mqd(adev, (void *)queue->mqd.cpu_ptr, userq_props); if (r) {
DRM_ERROR("Failed to initialize MQD for userqueue\n"); goto free_mqd;
}
/* Create BO for FW operations */
r = mes_userq_create_ctx_space(uq_mgr, queue, mqd_user); if (r) {
DRM_ERROR("Failed to allocate BO for userqueue (%d)", r); goto free_mqd;
}
/* FW expects WPTR BOs to be mapped into GART */
r = mes_userq_create_wptr_mapping(uq_mgr, queue, userq_props->wptr_gpu_addr); if (r) {
DRM_ERROR("Failed to create WPTR mapping\n"); goto free_ctx;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.