/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/ #include"amdgpu.h" #include"amdgpu_xcp.h" #include"amdgpu_drv.h"
staticint __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, struct amdgpu_xcp_ip *xcp_ip, int xcp_state)
{ int (*run_func)(void *handle, uint32_t inst_mask); int ret = 0;
if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs) return 0;
run_func = NULL;
switch (xcp_state) { case AMDGPU_XCP_PREPARE_SUSPEND:
run_func = xcp_ip->ip_funcs->prepare_suspend; break; case AMDGPU_XCP_SUSPEND:
run_func = xcp_ip->ip_funcs->suspend; break; case AMDGPU_XCP_PREPARE_RESUME:
run_func = xcp_ip->ip_funcs->prepare_resume; break; case AMDGPU_XCP_RESUME:
run_func = xcp_ip->ip_funcs->resume; break;
}
if (run_func)
ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask);
return ret;
}
staticint amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, int state)
{ struct amdgpu_xcp_ip *xcp_ip; struct amdgpu_xcp *xcp; int i, ret;
if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) return -EINVAL;
xcp = &xcp_mgr->xcp[xcp_id]; for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) {
xcp_ip = &xcp->ip[i];
ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, state); if (ret) break;
}
return ret;
}
int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
AMDGPU_XCP_PREPARE_SUSPEND);
}
int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_SUSPEND);
}
int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id,
AMDGPU_XCP_PREPARE_RESUME);
}
int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id)
{ return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, AMDGPU_XCP_RESUME);
}
int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode)
{ struct amdgpu_device *adev = xcp_mgr->adev; struct amdgpu_xcp_ip ip;
uint8_t mem_id; int i, j, ret;
if (!num_xcps || num_xcps > MAX_XCP) return -EINVAL;
xcp_mgr->mode = mode;
for (i = 0; i < MAX_XCP; ++i)
xcp_mgr->xcp[i].valid = false;
/* This is needed for figuring out memory id of xcp */
xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions;
for (i = 0; i < num_xcps; ++i) { for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) {
ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j,
&ip); if (ret) continue;
__amdgpu_xcp_add_block(xcp_mgr, i, &ip);
}
xcp_mgr->xcp[i].id = i;
if (xcp_mgr->funcs->get_xcp_mem_id) {
ret = xcp_mgr->funcs->get_xcp_mem_id(
xcp_mgr, &xcp_mgr->xcp[i], &mem_id); if (ret) continue; else
xcp_mgr->xcp[i].mem_id = mem_id;
}
}
staticint __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode)
{ int ret, curr_mode, num_xcps = 0;
if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode) return 0;
mutex_lock(&xcp_mgr->xcp_lock);
curr_mode = xcp_mgr->mode; /* State set to transient mode */
xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS;
ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps);
if (ret) { /* Failed, get whatever mode it's at now */ if (xcp_mgr->funcs->query_partition_mode)
xcp_mgr->mode = amdgpu_xcp_query_partition_mode(
xcp_mgr, AMDGPU_XCP_FL_LOCKED); else
xcp_mgr->mode = curr_mode;
for (i = 1; i < MAX_XCP; i++) {
ret = amdgpu_xcp_drm_dev_alloc(&p_ddev); if (ret == -ENOSPC) {
dev_warn(adev->dev, "Skip xcp node #%d when out of drm node resource.", i);
ret = 0; goto out;
} elseif (ret) { goto out;
}
dev_set_drvdata(p_ddev->dev, &adev->xcp_mgr->xcp[i]);
}
ret = 0;
out:
amdgpu_xcp_sysfs_entries_init(adev->xcp_mgr);
return ret;
}
int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, int init_num_xcps, struct amdgpu_xcp_mgr_funcs *xcp_funcs)
{ struct amdgpu_xcp_mgr *xcp_mgr; int i;
if (!xcp_funcs || !xcp_funcs->get_ip_details) return -EINVAL;
ring->xcp_id = AMDGPU_XCP_NO_PARTITION; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE)
adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) ||
(ring->funcs->type == AMDGPU_RING_TYPE_CPER)) return;
inst_mask = 1 << inst_idx;
switch (ring->funcs->type) { case AMDGPU_HW_IP_GFX: case AMDGPU_RING_TYPE_COMPUTE: case AMDGPU_RING_TYPE_KIQ:
ip_blk = AMDGPU_XCP_GFX; break; case AMDGPU_RING_TYPE_SDMA:
ip_blk = AMDGPU_XCP_SDMA; break; case AMDGPU_RING_TYPE_VCN_ENC: case AMDGPU_RING_TYPE_VCN_JPEG:
ip_blk = AMDGPU_XCP_VCN; break; default:
dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type); return;
}
staticint amdgpu_xcp_sched_list_update(struct amdgpu_device *adev)
{ struct amdgpu_ring *ring; int i;
for (i = 0; i < MAX_XCP; i++) {
atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0);
memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched));
}
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) return 0;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
ring = adev->rings[i]; if (!ring || !ring->sched.ready || ring->no_scheduler) continue;
/* VCN may be shared by two partitions under CPX MODE in certain * configs.
*/ if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC ||
ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) &&
(adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst))
amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1);
}
return 0;
}
int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev)
{ int i;
for (i = 0; i < adev->num_rings; i++) { struct amdgpu_ring *ring = adev->rings[i];
int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
{ /* TODO: * Stop user queues and threads, and make sure GPU is empty of work.
*/
if (flags & AMDGPU_XCP_OPS_KFD)
amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev);
return 0;
}
int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
{ int ret = 0;
if (flags & AMDGPU_XCP_OPS_KFD) {
amdgpu_amdkfd_device_probe(xcp_mgr->adev);
amdgpu_amdkfd_device_init(xcp_mgr->adev); /* If KFD init failed, return failure */ if (!xcp_mgr->adev->kfd.init_complete)
ret = -EIO;
}
staticvoid amdgpu_xcp_sysfs_entries_fini(struct amdgpu_xcp_mgr *xcp_mgr, int n)
{ struct amdgpu_xcp *xcp;
for (n--; n >= 0; n--) {
xcp = &xcp_mgr->xcp[n]; if (!xcp->ddev || !xcp->valid) continue;
sysfs_remove_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
kobject_put(&xcp->kobj);
}
}
staticvoid amdgpu_xcp_sysfs_entries_init(struct amdgpu_xcp_mgr *xcp_mgr)
{ struct amdgpu_xcp *xcp; int i, r;
for (i = 0; i < MAX_XCP; i++) { /* Redirect all IOCTLs to the primary device */
xcp = &xcp_mgr->xcp[i]; if (!xcp->ddev) break;
r = kobject_init_and_add(&xcp->kobj, &xcp_sysfs_ktype,
&xcp->ddev->dev->kobj, "xcp"); if (r) goto out;
r = sysfs_create_group(&xcp->kobj, &amdgpu_xcp_attrs_group); if (r) goto out;
}
return;
out:
kobject_put(&xcp->kobj);
}
staticvoid amdgpu_xcp_sysfs_entries_update(struct amdgpu_xcp_mgr *xcp_mgr)
{ struct amdgpu_xcp *xcp; int i;
for (i = 0; i < MAX_XCP; i++) { /* Redirect all IOCTLs to the primary device */
xcp = &xcp_mgr->xcp[i]; if (!xcp->ddev) continue;
sysfs_update_group(&xcp->kobj, &amdgpu_xcp_attrs_group);
}
return;
}
void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev)
{ if (!adev->xcp_mgr) return;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.