/* * Copyright 2014 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. *
*/
/* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100)
#define GFX_OFF_NO_DELAY 0
/* * GPU GFX IP block helpers function.
*/
int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue)
{ int bit = 0;
bit += mec * adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
bit += pipe * adev->gfx.mec.num_queue_per_pipe;
bit += queue;
return bit;
}
void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue)
{
*queue = bit % adev->gfx.mec.num_queue_per_pipe;
*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
% adev->gfx.mec.num_pipe_per_mec;
*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
/ adev->gfx.mec.num_pipe_per_mec;
}
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id, int mec, int pipe, int queue)
{ return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
adev->gfx.mec_bitmap[xcc_id].queue_bitmap);
}
staticint amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue)
{ int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ int bit = 0;
bit += me * adev->gfx.me.num_pipe_per_me
* num_queue_per_pipe;
bit += pipe * num_queue_per_pipe;
bit += queue;
return bit;
}
bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue)
{ return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
adev->gfx.me.queue_bitmap);
}
/** * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter * * @mask: array in which the per-shader array disable masks will be stored * @max_se: number of SEs * @max_sh: number of SHs * * The bitmask of CUs to be disabled in the shader array determined by se and * sh is stored in mask[se * max_sh + sh].
*/ void amdgpu_gfx_parse_disable_cu(unsignedint *mask, unsignedint max_se, unsignedint max_sh)
{ unsignedint se, sh, cu; constchar *p;
memset(mask, 0, sizeof(*mask) * max_se * max_sh);
if (!amdgpu_disable_cu || !*amdgpu_disable_cu) return;
p = amdgpu_disable_cu; for (;;) { char *next; int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
if (ret < 3) {
DRM_ERROR("amdgpu: could not parse disable_cu\n"); return;
}
if (se < max_se && sh < max_sh && cu < 16) {
DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
mask[se * max_sh + sh] |= 1u << cu;
} else {
DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
se, sh, cu);
}
next = strchr(p, ','); if (!next) break;
p = next + 1;
}
}
if (amdgpu_ip_version(adev, GC_HWIP, 0) > IP_VERSION(9, 0, 0)) returntrue;
/* FIXME: spreading the queues across pipes causes perf regressions
* on POLARIS11 compute workloads */ if (adev->asic_type == CHIP_POLARIS11) returnfalse;
return adev->gfx.mec.num_mec > 1;
}
bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{ int queue = ring->queue; int pipe = ring->pipe;
/* Policy: use pipe1 queue0 as high priority graphics queue if we * have more than one gfx pipe.
*/ if (amdgpu_gfx_is_graphics_multipipe_capable(adev) &&
adev->gfx.num_gfx_rings > 1 && pipe == 1 && queue == 0) { int me = ring->me; int bit;
bit = amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue); if (ring == &adev->gfx.gfx_ring[bit]) returntrue;
}
returnfalse;
}
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring)
{ /* Policy: use 1st queue as high priority compute queue if we * have more than one compute queue.
*/ if (adev->gfx.num_compute_rings > 1 &&
ring == &adev->gfx.compute_ring[0]) returntrue;
returnfalse;
}
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{ int i, j, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_compute_multipipe_capable(adev); int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe,
adev->gfx.num_compute_rings); int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1;
if (multipipe_policy) { /* policy: make queues evenly cross all pipes on MEC1 only
* for multiple xcc, just use the original policy for simplicity */ for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; i++) {
pipe = i % adev->gfx.mec.num_pipe_per_mec;
queue = (i / adev->gfx.mec.num_pipe_per_mec) %
adev->gfx.mec.num_queue_per_pipe;
set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
adev->gfx.mec_bitmap[j].queue_bitmap);
}
}
} else { /* policy: amdgpu owns all queues in the given pipe */ for (j = 0; j < num_xcc; j++) { for (i = 0; i < max_queues_per_mec; ++i)
set_bit(i, adev->gfx.mec_bitmap[j].queue_bitmap);
}
}
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{ int i, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_graphics_multipipe_capable(adev); int num_queue_per_pipe = 1; /* we only enable 1 KGQ per pipe */ int max_queues_per_me = adev->gfx.me.num_pipe_per_me * num_queue_per_pipe;
if (multipipe_policy) { /* policy: amdgpu owns the first queue per pipe at this stage
* will extend to mulitple queues per pipe later */ for (i = 0; i < max_queues_per_me; i++) {
pipe = i % adev->gfx.me.num_pipe_per_me;
queue = (i / adev->gfx.me.num_pipe_per_me) %
num_queue_per_pipe;
set_bit(pipe * num_queue_per_pipe + queue,
adev->gfx.me.queue_bitmap);
}
} else { for (i = 0; i < max_queues_per_me; ++i)
set_bit(i, adev->gfx.me.queue_bitmap);
}
/* update the number of active graphics rings */ if (adev->gfx.num_gfx_rings)
adev->gfx.num_gfx_rings =
bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}
staticint amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev, struct amdgpu_ring *ring, int xcc_id)
{ int queue_bit; int mec, pipe, queue;
/* * 1. Using pipes 2/3 from MEC 2 seems cause problems. * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN * only can be issued on queue 0.
*/ if ((mec == 1 && pipe > 1) || queue != 0) continue;
int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, unsignedint hpd_size, int xcc_id)
{ int r;
u32 *hpd; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
&kiq->eop_gpu_addr, (void **)&hpd); if (r) {
dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); return r;
}
memset(hpd, 0, hpd_size);
r = amdgpu_bo_reserve(kiq->eop_obj, true); if (unlikely(r != 0))
dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
amdgpu_bo_kunmap(kiq->eop_obj);
amdgpu_bo_unreserve(kiq->eop_obj);
return 0;
}
/* create MQD for each compute/gfx queue */ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, unsignedint mqd_size, int xcc_id)
{ int r, i, j; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *ring = &kiq->ring;
u32 domain = AMDGPU_GEM_DOMAIN_GTT;
#if !defined(CONFIG_ARM) && !defined(CONFIG_ARM64) /* Only enable on gfx10 and 11 for now to avoid changing behavior on older chips */ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 0, 0))
domain |= AMDGPU_GEM_DOMAIN_VRAM; #endif
/* create MQD for KIQ */ if (!adev->enable_mes_kiq && !ring->mqd_obj) { /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for * KIQ MQD no matter SRIOV or Bare-metal
*/
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT,
&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr); if (r) {
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); return r;
}
/* prepare MQD backup */
kiq->mqd_backup = kzalloc(mqd_size, GFP_KERNEL); if (!kiq->mqd_backup) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM;
}
}
if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { /* create MQD for each KGQ */ for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i]; if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
domain, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr); if (r) {
dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); return r;
}
ring->mqd_size = mqd_size; /* prepare MQD backup */
adev->gfx.me.mqd_backup[i] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.me.mqd_backup[i]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM;
}
}
}
}
/* create MQD for each KCQ */ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
ring = &adev->gfx.compute_ring[j]; if (!ring->mqd_obj) {
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
domain, &ring->mqd_obj,
&ring->mqd_gpu_addr, &ring->mqd_ptr); if (r) {
dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r); return r;
}
ring->mqd_size = mqd_size; /* prepare MQD backup */
adev->gfx.mec.mqd_backup[j] = kzalloc(mqd_size, GFP_KERNEL); if (!adev->gfx.mec.mqd_backup[j]) {
dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name); return -ENOMEM;
}
}
}
return 0;
}
void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id)
{ struct amdgpu_ring *ring = NULL; int i, j; struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
kfree(adev->gfx.me.mqd_backup[i]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
ring = &adev->gfx.compute_ring[j];
kfree(adev->gfx.mec.mqd_backup[j]);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
ring = &kiq->ring;
kfree(kiq->mqd_backup);
amdgpu_bo_free_kernel(&ring->mqd_obj,
&ring->mqd_gpu_addr,
&ring->mqd_ptr);
}
int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
{ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; int j;
if (adev->enable_mes) { for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
amdgpu_mes_unmap_legacy_queue(adev,
&adev->gfx.compute_ring[j],
RESET_QUEUES, 0, 0);
} return 0;
}
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL;
if (!kiq_ring->sched.ready || amdgpu_in_reset(adev)) return 0;
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
kiq->pmf->kiq_unmap_queues(kiq_ring,
&adev->gfx.compute_ring[j],
RESET_QUEUES, 0, 0);
} /* Submit unmap queue packet */
amdgpu_ring_commit(kiq_ring); /* * Ring test will do a basic scratch register change check. Just run * this to ensure that unmap queues that is submitted before got * processed successfully before returning.
*/
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
return r;
}
int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
{ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring; int i, r = 0; int j;
if (adev->enable_mes) { if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
amdgpu_mes_unmap_legacy_queue(adev,
&adev->gfx.gfx_ring[j],
PREEMPT_QUEUES, 0, 0);
}
} return 0;
}
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL;
if (!adev->gfx.kiq[0].ring.sched.ready || amdgpu_in_reset(adev)) return 0;
if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
spin_lock(&kiq->ring_lock); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
adev->gfx.num_gfx_rings)) {
spin_unlock(&kiq->ring_lock); return -ENOMEM;
}
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
kiq->pmf->kiq_unmap_queues(kiq_ring,
&adev->gfx.gfx_ring[j],
PREEMPT_QUEUES, 0, 0);
} /* Submit unmap queue packet */
amdgpu_ring_commit(kiq_ring);
/* * Ring test will do a basic scratch register change check. * Just run this to ensure that unmap queues that is submitted * before got processed successfully before returning.
*/
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
}
return r;
}
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, int queue_bit)
{ int mec, pipe, queue; int set_resource_bit = 0;
staticint amdgpu_gfx_mes_enable_kcq(struct amdgpu_device *adev, int xcc_id)
{ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring;
uint64_t queue_mask = ~0ULL; int r, i, j;
amdgpu_device_flush_hdp(adev, NULL);
if (!adev->enable_uni_mes) {
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->set_resources_size); if (r) {
dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock); return r;
}
kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock); if (r)
dev_err(adev->dev, "KIQ failed to set resources\n");
}
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.compute_ring[j]); if (r) {
dev_err(adev->dev, "failed to map compute queue\n"); return r;
}
}
return 0;
}
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
{ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring;
uint64_t queue_mask = 0; int r, i, j;
if (adev->mes.enable_legacy_queue_map) return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources) return -EINVAL;
for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { if (!test_bit(i, adev->gfx.mec_bitmap[xcc_id].queue_bitmap)) continue;
/* This situation may be hit in the future if a new HW * generation exposes more than 64 queues. If so, the
* definition of queue_mask needs updating */ if (WARN_ON(i > (sizeof(queue_mask)*8))) {
dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i); break;
}
spin_lock(&kiq->ring_lock);
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_compute_rings +
kiq->pmf->set_resources_size); if (r) {
dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock); return r;
}
kiq->pmf->kiq_set_resources(kiq_ring, queue_mask); for (i = 0; i < adev->gfx.num_compute_rings; i++) {
j = i + xcc_id * adev->gfx.num_compute_rings;
kiq->pmf->kiq_map_queues(kiq_ring,
&adev->gfx.compute_ring[j]);
} /* Submit map queue packet */
amdgpu_ring_commit(kiq_ring); /* * Ring test will do a basic scratch register change check. Just run * this to ensure that map queues that is submitted before got * processed successfully before returning.
*/
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock); if (r)
dev_err(adev->dev, "KCQ enable failed\n");
return r;
}
int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
{ struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id]; struct amdgpu_ring *kiq_ring = &kiq->ring; int r, i, j;
if (!kiq->pmf || !kiq->pmf->kiq_map_queues) return -EINVAL;
amdgpu_device_flush_hdp(adev, NULL);
if (adev->mes.enable_legacy_queue_map) { for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev,
&adev->gfx.gfx_ring[j]); if (r) {
dev_err(adev->dev, "failed to map gfx queue\n"); return r;
}
}
return 0;
}
spin_lock(&kiq->ring_lock); /* No need to map kcq on the slave */ if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
adev->gfx.num_gfx_rings); if (r) {
dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r);
spin_unlock(&kiq->ring_lock); return r;
}
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
kiq->pmf->kiq_map_queues(kiq_ring,
&adev->gfx.gfx_ring[j]);
}
} /* Submit map queue packet */
amdgpu_ring_commit(kiq_ring); /* * Ring test will do a basic scratch register change check. Just run * this to ensure that map queues that is submitted before got * processed successfully before returning.
*/
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock); if (r)
dev_err(adev->dev, "KGQ enable failed\n");
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return;
mutex_lock(&adev->gfx.gfx_off_mutex);
if (enable) { /* If the count is already 0, it means there's an imbalance bug somewhere. * Note that the bug may be in a different caller than the one which triggers the * WARN_ON_ONCE.
*/ if (WARN_ON_ONCE(adev->gfx.gfx_off_req_count == 0)) goto unlock;
adev->gfx.gfx_off_req_count--;
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) { /* If going to s2idle, no need to wait */ if (no_delay) { if (!amdgpu_dpm_set_powergating_by_smu(adev,
AMD_IP_BLOCK_TYPE_GFX, true, 0))
adev->gfx.gfx_off_state = true;
} else {
schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
}
}
} else { if (adev->gfx.gfx_off_req_count == 0) {
cancel_delayed_work_sync(&adev->gfx.gfx_off_delay_work);
if (adev->gfx.funcs->init_spm_golden) {
dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
amdgpu_gfx_init_spm_golden(adev);
}
}
}
adev->gfx.gfx_off_req_count++;
}
unlock:
mutex_unlock(&adev->gfx.gfx_off_mutex);
}
/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable * * @adev: amdgpu_device pointer * @bool enable true: enable gfx off feature, false: disable gfx off feature * * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled. * 2. other client can send request to disable gfx off feature, the request should be honored. * 3. other client can cancel their request of disable gfx off feature * 4. other client should not send request to enable gfx off feature before disable gfx off feature. * * gfx off allow will be delayed by GFX_OFF_DELAY_ENABLE ms.
*/ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{ /* If going to s2idle, no need to wait */ bool no_delay = adev->in_s0ix ? true : false;
amdgpu_gfx_do_off_ctrl(adev, enable, no_delay);
}
/* amdgpu_gfx_off_ctrl_immediate - Handle gfx off feature enable/disable * * @adev: amdgpu_device pointer * @bool enable true: enable gfx off feature, false: disable gfx off feature * * 1. gfx off feature will be enabled by gfx ip after gfx cg pg enabled. * 2. other client can send request to disable gfx off feature, the request should be honored. * 3. other client can cancel their request of disable gfx off feature * 4. other client should not send request to enable gfx off feature before disable gfx off feature. * * gfx off allow will be issued immediately.
*/ void amdgpu_gfx_off_ctrl_immediate(struct amdgpu_device *adev, bool enable)
{
amdgpu_gfx_do_off_ctrl(adev, enable, true);
}
int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value)
{ int r = 0;
mutex_lock(&adev->gfx.gfx_off_mutex);
r = amdgpu_dpm_set_residency_gfxoff(adev, value);
mutex_unlock(&adev->gfx.gfx_off_mutex);
return r;
}
int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *value)
{ int r = 0;
mutex_lock(&adev->gfx.gfx_off_mutex);
r = amdgpu_dpm_get_residency_gfxoff(adev, value);
mutex_unlock(&adev->gfx.gfx_off_mutex);
return r;
}
int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value)
{ int r = 0;
mutex_lock(&adev->gfx.gfx_off_mutex);
r = amdgpu_dpm_get_entrycount_gfxoff(adev, value);
mutex_unlock(&adev->gfx.gfx_off_mutex);
return r;
}
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
{
int r = 0;
mutex_lock(&adev->gfx.gfx_off_mutex);
r = amdgpu_dpm_get_status_gfxoff(adev, value);
mutex_unlock(&adev->gfx.gfx_off_mutex);
return r;
}
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
{ int r;
if (amdgpu_ras_is_supported(adev, ras_block->block)) { if (!amdgpu_persistent_edc_harvesting_supported(adev)) {
r = amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); if (r) return r;
}
r = amdgpu_ras_block_late_init(adev, ras_block); if (r) return r;
if (amdgpu_sriov_vf(adev)) return r;
if (adev->gfx.cp_ecc_error_irq.funcs) {
r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) goto late_fini;
}
} else {
amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0);
}
/* If not define special ras_late_init function, use gfx default ras_late_init */ if (!ras->ras_block.ras_late_init)
ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init;
/* If not defined special ras_cb function, use default ras_cb */ if (!ras->ras_block.ras_cb)
ras->ras_block.ras_cb = amdgpu_gfx_process_ras_data_cb;
return 0;
}
int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry)
{ if (adev->gfx.ras && adev->gfx.ras->poison_consumption_handler) return adev->gfx.ras->poison_consumption_handler(adev, entry);
return 0;
}
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry)
{ /* TODO ue will trigger an interrupt. * * When “Full RAS” is enabled, the per-IP interrupt sources should * be disabled and the driver should only look for the aggregated * interrupt via sync flood
*/ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops &&
adev->gfx.ras->ras_block.hw_ops->query_ras_error_count)
adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data);
amdgpu_ras_reset_gpu(adev);
} return AMDGPU_RAS_SUCCESS;
}
if (adev->mes.ring[0].sched.ready) return amdgpu_mes_rreg(adev, reg);
BUG_ON(!ring->funcs->emit_rreg);
spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_device_wb_get(adev, ®_val_offs)) {
pr_err("critical bug! too many kiq readers\n"); goto failed_unlock;
}
r = amdgpu_ring_alloc(ring, 32); if (r) goto failed_unlock;
amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT); if (r) goto failed_undo;
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
/* don't wait anymore for gpu reset case because this way may * block gpu_recover() routine forever, e.g. this virt_kiq_rreg * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will * never return if we keep waiting in virt_kiq_rreg, which cause * gpu_recover() hang there. * * also don't wait anymore for IRQ context
* */ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) goto failed_kiq_read;
might_sleep(); while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
if (cnt > MAX_KIQ_REG_TRY) goto failed_kiq_read;
mb();
value = adev->wb.wb[reg_val_offs];
amdgpu_device_wb_free(adev, reg_val_offs); return value;
failed_undo:
amdgpu_ring_undo(ring);
failed_unlock:
spin_unlock_irqrestore(&kiq->ring_lock, flags);
failed_kiq_read: if (reg_val_offs)
amdgpu_device_wb_free(adev, reg_val_offs);
dev_err(adev->dev, "failed to read reg:%x\n", reg); return ~0;
}
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
/* don't wait anymore for gpu reset case because this way may * block gpu_recover() routine forever, e.g. this virt_kiq_rreg * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will * never return if we keep waiting in virt_kiq_rreg, which cause * gpu_recover() hang there. * * also don't wait anymore for IRQ context
* */ if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt())) goto failed_kiq_write;
might_sleep(); while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
}
/* Initialize the scheduler entity */
r = drm_sched_entity_init(&entity, DRM_SCHED_PRIORITY_NORMAL,
&sched, 1, NULL); if (r) {
dev_err(adev->dev, "Failed setting up GFX kernel entity.\n"); goto err;
}
/* * Use some unique dummy value as the owner to make sure we execute * the cleaner shader on each submission. The value just need to change * for each submission and is otherwise meaningless.
*/
owner = (void *)(unsignedlong)atomic_inc_return(&counter);
r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner,
64, 0, &job); if (r) goto err;
job->enforce_isolation = true; /* always run the cleaner shader */
job->run_cleaner_shader = true;
ib = &job->ibs[0]; for (i = 0; i <= ring->funcs->align_mask; ++i)
ib->ptr[i] = ring->funcs->nop;
ib->length_dw = ring->funcs->align_mask + 1;
f = amdgpu_job_submit(job);
r = dma_fence_wait(f, false); if (r) goto err;
dma_fence_put(f);
/* Clean up the scheduler entity */
drm_sched_entity_destroy(&entity); return 0;
err: return r;
}
staticint amdgpu_gfx_run_cleaner_shader(struct amdgpu_device *adev, int xcp_id)
{ int num_xcc = NUM_XCC(adev->gfx.xcc_mask); struct amdgpu_ring *ring; int num_xcc_to_clear; int i, r, xcc_id;
if (adev->gfx.num_xcc_per_xcp)
num_xcc_to_clear = adev->gfx.num_xcc_per_xcp; else
num_xcc_to_clear = 1;
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) { for (i = 0; i < adev->gfx.num_compute_rings; i++) {
ring = &adev->gfx.compute_ring[i + xcc_id * adev->gfx.num_compute_rings]; if ((ring->xcp_id == xcp_id) && ring->sched.ready) {
r = amdgpu_gfx_run_cleaner_shader_job(ring); if (r) return r;
num_xcc_to_clear--; break;
}
}
}
if (num_xcc_to_clear) return -ENOENT;
return 0;
}
/** * amdgpu_gfx_set_run_cleaner_shader - Execute the AMDGPU GFX Cleaner Shader * @dev: The device structure * @attr: The device attribute structure * @buf: The buffer containing the input data * @count: The size of the input data * * Provides the sysfs interface to manually run a cleaner shader, which is * used to clear the GPU state between different tasks. Writing a value to the * 'run_cleaner_shader' sysfs file triggers the cleaner shader execution. * The value written corresponds to the partition index on multi-partition * devices. On single-partition devices, the value should be '0'. * * The cleaner shader clears the Local Data Store (LDS) and General Purpose * Registers (GPRs) to ensure data isolation between GPU workloads. * * Return: The number of bytes written to the sysfs file.
*/ static ssize_t amdgpu_gfx_set_run_cleaner_shader(struct device *dev, struct device_attribute *attr, constchar *buf,
size_t count)
{ struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); int ret; long value;
if (amdgpu_in_reset(adev)) return -EPERM; if (adev->in_suspend && !adev->in_runpm) return -EPERM;
if (adev->gfx.disable_kq) return -EPERM;
ret = kstrtol(buf, 0, &value);
if (ret) return -EINVAL;
if (value < 0) return -EINVAL;
if (adev->xcp_mgr) { if (value >= adev->xcp_mgr->num_xcps) return -EINVAL;
} else { if (value > 1) return -EINVAL;
}
ret = pm_runtime_get_sync(ddev->dev); if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev); return ret;
}
/** * amdgpu_gfx_get_enforce_isolation - Query AMDGPU GFX Enforce Isolation Settings * @dev: The device structure * @attr: The device attribute structure * @buf: The buffer to store the output data * * Provides the sysfs read interface to get the current settings of the 'enforce_isolation' * feature for each GPU partition. Reading from the 'enforce_isolation' * sysfs file returns the isolation settings for all partitions, where '0' * indicates disabled, '1' indicates enabled, and '2' indicates enabled in legacy mode, * and '3' indicates enabled without cleaner shader. * * Return: The number of bytes read from the sysfs file.
*/ static ssize_t amdgpu_gfx_get_enforce_isolation(struct device *dev, struct device_attribute *attr, char *buf)
{ struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); int i;
ssize_t size = 0;
if (adev->xcp_mgr) { for (i = 0; i < adev->xcp_mgr->num_xcps; i++) {
size += sysfs_emit_at(buf, size, "%u", adev->enforce_isolation[i]); if (i < (adev->xcp_mgr->num_xcps - 1))
size += sysfs_emit_at(buf, size, " ");
}
buf[size++] = '\n';
} else {
size = sysfs_emit_at(buf, 0, "%u\n", adev->enforce_isolation[0]);
}
return size;
}
/** * amdgpu_gfx_set_enforce_isolation - Control AMDGPU GFX Enforce Isolation * @dev: The device structure * @attr: The device attribute structure * @buf: The buffer containing the input data * @count: The size of the input data * * This function allows control over the 'enforce_isolation' feature, which * serializes access to the graphics engine. Writing '0' to disable, '1' to * enable isolation with cleaner shader, '2' to enable legacy isolation without * cleaner shader, or '3' to enable process isolation without submitting the * cleaner shader to the 'enforce_isolation' sysfs file sets the isolation mode * for each partition. The input should specify the setting for all * partitions. * * Return: The number of bytes written to the sysfs file.
*/ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev, struct device_attribute *attr, constchar *buf, size_t count)
{ struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); long partition_values[MAX_XCP] = {0}; int ret, i, num_partitions; constchar *input_buf = buf;
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
ret = sscanf(input_buf, "%ld", &partition_values[i]); if (ret <= 0) break;
/* Move the pointer to the next value in the string */
input_buf = strchr(input_buf, ' '); if (input_buf) {
input_buf++;
} else {
i++; break;
}
}
num_partitions = i;
if (adev->xcp_mgr && num_partitions != adev->xcp_mgr->num_xcps) return -EINVAL;
if (!adev->xcp_mgr && num_partitions != 1) return -EINVAL;
for (i = 0; i < num_partitions; i++) { if (partition_values[i] != 0 &&
partition_values[i] != 1 &&
partition_values[i] != 2 &&
partition_values[i] != 3) return -EINVAL;
}
mutex_lock(&adev->enforce_isolation_mutex); for (i = 0; i < num_partitions; i++) { switch (partition_values[i]) { case 0: default:
adev->enforce_isolation[i] = AMDGPU_ENFORCE_ISOLATION_DISABLE; break; case 1:
adev->enforce_isolation[i] =
AMDGPU_ENFORCE_ISOLATION_ENABLE; break; case 2:
adev->enforce_isolation[i] =
AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY; break; case 3:
adev->enforce_isolation[i] =
AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER; break;
}
}
mutex_unlock(&adev->enforce_isolation_mutex);
if (xcp_switch_supported)
device_remove_file(adev->dev,
&dev_attr_available_compute_partition);
}
staticint amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
{ int r;
r = device_create_file(adev->dev, &dev_attr_enforce_isolation); if (r) return r; if (adev->gfx.enable_cleaner_shader)
r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
if (adev->gfx.cleaner_shader_cpu_ptr && cleaner_shader_ptr)
memcpy_toio(adev->gfx.cleaner_shader_cpu_ptr, cleaner_shader_ptr,
cleaner_shader_size);
}
/** * amdgpu_gfx_kfd_sch_ctrl - Control the KFD scheduler from the KGD (Graphics Driver) * @adev: amdgpu_device pointer * @idx: Index of the scheduler to control * @enable: Whether to enable or disable the KFD scheduler * * This function is used to control the KFD (Kernel Fusion Driver) scheduler * from the KGD. It is part of the cleaner shader feature. This function plays * a key role in enforcing process isolation on the GPU. * * The function uses a reference count mechanism (kfd_sch_req_count) to keep * track of the number of requests to enable the KFD scheduler. When a request * to enable the KFD scheduler is made, the reference count is decremented. * When the reference count reaches zero, a delayed work is scheduled to * enforce isolation after a delay of GFX_SLICE_PERIOD. * * When a request to disable the KFD scheduler is made, the function first * checks if the reference count is zero. If it is, it cancels the delayed work * for enforcing isolation and checks if the KFD scheduler is active. If the * KFD scheduler is active, it sends a request to stop the KFD scheduler and * sets the KFD scheduler state to inactive. Then, it increments the reference * count. * * The function is synchronized using the kfd_sch_mutex to ensure that the KFD * scheduler state and reference count are updated atomically. * * Note: If the reference count is already zero when a request to enable the * KFD scheduler is made, it means there's an imbalance bug somewhere. The * function triggers a warning in this case.
*/ staticvoid amdgpu_gfx_kfd_sch_ctrl(struct amdgpu_device *adev, u32 idx, bool enable)
{
mutex_lock(&adev->gfx.userq_sch_mutex);
if (enable) { /* If the count is already 0, it means there's an imbalance bug somewhere. * Note that the bug may be in a different caller than the one which triggers the * WARN_ON_ONCE.
*/ if (WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx] == 0)) {
dev_err(adev->dev, "Attempted to enable KFD scheduler when reference count is already zero\n"); goto unlock;
}
adev->gfx.userq_sch_req_count[idx]--;
if (adev->gfx.userq_sch_req_count[idx] == 0 &&
adev->gfx.userq_sch_inactive[idx]) {
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
msecs_to_jiffies(adev->gfx.enforce_isolation_time[idx]));
}
} else { if (adev->gfx.userq_sch_req_count[idx] == 0) {
cancel_delayed_work_sync(&adev->gfx.enforce_isolation[idx].work); if (!adev->gfx.userq_sch_inactive[idx]) {
amdgpu_userq_stop_sched_for_enforce_isolation(adev, idx); if (adev->kfd.init_complete)
amdgpu_amdkfd_stop_sched(adev, idx);
adev->gfx.userq_sch_inactive[idx] = true;
}
}
/** * amdgpu_gfx_enforce_isolation_handler - work handler for enforcing shader isolation * * @work: work_struct. * * This function is the work handler for enforcing shader isolation on AMD GPUs. * It counts the number of emitted fences for each GFX and compute ring. If there * are any fences, it schedules the `enforce_isolation_work` to be run after a * delay of `GFX_SLICE_PERIOD`. If there are no fences, it signals the Kernel Fusion * Driver (KFD) to resume the runqueue. The function is synchronized using the * `enforce_isolation_mutex`.
*/ void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work)
{ struct amdgpu_isolation_work *isolation_work =
container_of(work, struct amdgpu_isolation_work, work.work); struct amdgpu_device *adev = isolation_work->adev;
u32 i, idx, fences = 0;
mutex_lock(&adev->enforce_isolation_mutex); for (i = 0; i < AMDGPU_MAX_GFX_RINGS; ++i) { if (isolation_work->xcp_id == adev->gfx.gfx_ring[i].xcp_id)
fences += amdgpu_fence_count_emitted(&adev->gfx.gfx_ring[i]);
} for (i = 0; i < (AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES); ++i) { if (isolation_work->xcp_id == adev->gfx.compute_ring[i].xcp_id)
fences += amdgpu_fence_count_emitted(&adev->gfx.compute_ring[i]);
} if (fences) { /* we've already had our timeslice, so let's wrap this up */
schedule_delayed_work(&adev->gfx.enforce_isolation[idx].work,
msecs_to_jiffies(1));
} else { /* Tell KFD to resume the runqueue */
WARN_ON_ONCE(!adev->gfx.userq_sch_inactive[idx]);
WARN_ON_ONCE(adev->gfx.userq_sch_req_count[idx]);
/** * amdgpu_gfx_enforce_isolation_wait_for_kfd - Manage KFD wait period for process isolation * @adev: amdgpu_device pointer * @idx: Index of the GPU partition * * When kernel submissions come in, the jobs are given a time slice and once * that time slice is up, if there are KFD user queues active, kernel * submissions are blocked until KFD has had its time slice. Once the KFD time * slice is up, KFD user queues are preempted and kernel submissions are * unblocked and allowed to run again.
*/ staticvoid
amdgpu_gfx_enforce_isolation_wait_for_kfd(struct amdgpu_device *adev,
u32 idx)
{ unsignedlong cjiffies; bool wait = false;
mutex_lock(&adev->enforce_isolation_mutex); if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { /* set the initial values if nothing is set */ if (!adev->gfx.enforce_isolation_jiffies[idx]) {
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
} /* Make sure KFD gets a chance to run */ if (amdgpu_amdkfd_compute_active(adev, idx)) {
cjiffies = jiffies; if (time_after(cjiffies, adev->gfx.enforce_isolation_jiffies[idx])) {
cjiffies -= adev->gfx.enforce_isolation_jiffies[idx]; if ((jiffies_to_msecs(cjiffies) >= GFX_SLICE_PERIOD_MS)) { /* if our time is up, let KGD work drain before scheduling more */
wait = true; /* reset the timer period */
adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
} else { /* set the timer period to what's left in our time slice */
adev->gfx.enforce_isolation_time[idx] =
GFX_SLICE_PERIOD_MS - jiffies_to_msecs(cjiffies);
}
} else { /* if jiffies wrap around we will just wait a little longer */
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
}
} else { /* if there is no KFD work, then set the full slice period */
adev->gfx.enforce_isolation_jiffies[idx] = jiffies;
adev->gfx.enforce_isolation_time[idx] = GFX_SLICE_PERIOD_MS;
}
}
mutex_unlock(&adev->enforce_isolation_mutex);
if (wait)
msleep(GFX_SLICE_PERIOD_MS);
}
/** * amdgpu_gfx_enforce_isolation_ring_begin_use - Begin use of a ring with enforced isolation * @ring: Pointer to the amdgpu_ring structure * * Ring begin_use helper implementation for gfx which serializes access to the * gfx IP between kernel submission IOCTLs and KFD user queues when isolation * enforcement is enabled. The kernel submission IOCTLs and KFD user queues * each get a time slice when both are active.
*/ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
u32 idx; bool sched_work = false;
/* Don't submit more work until KFD has had some time */
amdgpu_gfx_enforce_isolation_wait_for_kfd(adev, idx);
mutex_lock(&adev->enforce_isolation_mutex); if (adev->enforce_isolation[idx] == AMDGPU_ENFORCE_ISOLATION_ENABLE) { if (adev->kfd.init_complete)
sched_work = true;
}
mutex_unlock(&adev->enforce_isolation_mutex);
if (sched_work)
amdgpu_gfx_kfd_sch_ctrl(adev, idx, false);
}
/** * amdgpu_gfx_enforce_isolation_ring_end_use - End use of a ring with enforced isolation * @ring: Pointer to the amdgpu_ring structure * * Ring end_use helper implementation for gfx which serializes access to the * gfx IP between kernel submission IOCTLs and KFD user queues when isolation * enforcement is enabled. The kernel submission IOCTLs and KFD user queues * each get a time slice when both are active.
*/ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
{ struct amdgpu_device *adev = ring->adev;
u32 idx; bool sched_work = false;
if (amdgpu_dpm_is_overdrive_enabled(adev)) return;
if (adev->gfx.num_gfx_rings)
profile = PP_SMC_POWER_PROFILE_FULLSCREEN3D; else
profile = PP_SMC_POWER_PROFILE_COMPUTE;
atomic_inc(&adev->gfx.total_submission_cnt);
cancel_delayed_work_sync(&adev->gfx.idle_work);
/* We can safely return early here because we've cancelled the * the delayed work so there is no one else to set it to false * and we don't care if someone else sets it to true.
*/ if (adev->gfx.workload_profile_active) return;
mutex_lock(&adev->gfx.workload_profile_mutex); if (!adev->gfx.workload_profile_active) {
r = amdgpu_dpm_switch_power_profile(adev, profile, true); if (r)
dev_warn(adev->dev, "(%d) failed to disable %s power profile mode\n", r,
profile == PP_SMC_POWER_PROFILE_FULLSCREEN3D ? "fullscreen 3D" : "compute");
adev->gfx.workload_profile_active = true;
}
mutex_unlock(&adev->gfx.workload_profile_mutex);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.