/* * Copyright 2014-2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE.
*/ #include"amdgpu.h" #include"amdgpu_amdkfd.h" #include"gc/gc_9_0_offset.h" #include"gc/gc_9_0_sh_mask.h" #include"vega10_enum.h" #include"sdma0/sdma0_4_0_offset.h" #include"sdma0/sdma0_4_0_sh_mask.h" #include"sdma1/sdma1_4_0_offset.h" #include"sdma1/sdma1_4_0_sh_mask.h" #include"athub/athub_1_0_offset.h" #include"athub/athub_1_0_sh_mask.h" #include"oss/osssys_4_0_offset.h" #include"oss/osssys_4_0_sh_mask.h" #include"soc15_common.h" #include"v9_structs.h" #include"soc15.h" #include"soc15d.h" #include"gfx_v9_0.h" #include"amdgpu_amdkfd_gfx_v9.h" #include <uapi/linux/kfd_ioctl.h>
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_CONFIG, sh_mem_config);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmSH_MEM_BASES, sh_mem_bases); /* APE1 no longer exists on GFX9 */
kgd_gfx_v9_unlock_srbm(adev, inst);
}
int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsignedint vmid, uint32_t inst)
{ /* * We have to assume that there is no outstanding mapping. * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because * a mapping is in progress or because a mapping finished * and the SW cleared it. * So the protocol is to always wait & clear.
*/
uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
ATC_VMID0_PASID_MAPPING__VALID_MASK;
/* * need to do this twice, once for gfx and once for mmhub * for ATC add 16 to VMID for mmhub, for IH different registers. * ATC_VMID0..15 registers are separate from ATC_VMID16..31.
*/
/* Activate doorbell logic before triggering WPTR poll. */
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
WREG32_SOC15_RLC(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL, data);
if (wptr) { /* Don't read wptr with get_user because the user * context may not be accessible (if this function * runs in a work queue). Instead trigger a one-shot * polling read from memory in the CP. This assumes * that wptr is GPU-accessible in the queue's VMID via * ATC or SVM. WPTR==RPTR before starting the poll so * the CP starts fetching new commands from the right * place. * * Guessing a 64-bit WPTR from a 32-bit RPTR is a bit * tricky. Assume that the queue didn't overflow. The * number of valid bits in the 32-bit RPTR depends on * the queue size. The remaining bits are taken from * the saved 64-bit WPTR. If the WPTR wrapped, add the * queue size.
*/
uint32_t queue_size =
2 << REG_GET_FIELD(m->cp_hqd_pq_control,
CP_HQD_PQ_CONTROL, QUEUE_SIZE);
uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1);
if (m->cp_hqd_vmid == 0)
WREG32_FIELD15_RLC(GC, GET_INST(GC, inst), RLC_CP_SCHEDULERS, scheduler1, 0);
switch (reset_type) { case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
type = DRAIN_PIPE; break; case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES; break; case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
type = SAVE_WAVES; break; default:
type = DRAIN_PIPE; break;
}
/* * GFX9 helper for wave launch stall requirements on debug trap setting. * * vmid: * Target VMID to stall/unstall. * * stall: * 0-unstall wave launch (enable), 1-stall wave launch (disable). * After wavefront launch has been stalled, allocated waves must drain from * SPI in order for debug trap settings to take effect on those waves. * This is roughly a ~96 clock cycle wait on SPI where a read on * SPI_GDBG_WAVE_CNTL translates to ~32 clock cycles. * KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY indicates the number of reads required. * * NOTE: We can afford to clear the entire STALL_VMID field on unstall * because GFX9.4.1 cannot support multi-process debugging due to trap * configuration and masking being limited to global scope. Always assume * single process conditions.
*/ #define KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY 3 void kgd_gfx_v9_set_wave_launch_stall(struct amdgpu_device *adev,
uint32_t vmid, bool stall)
{ int i;
uint32_t data = RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
for (i = 0; i < KGD_GFX_V9_WAVE_LAUNCH_SPI_DRAIN_LATENCY; i++)
RREG32(SOC15_REG_OFFSET(GC, 0, mmSPI_GDBG_WAVE_CNTL));
}
/* * restore_dbg_registers is ignored here but is a general interface requirement * for devices that support GFXOFF and where the RLC save/restore list * does not support hw registers for debugging i.e. the driver has to manually * initialize the debug mode registers after it has disabled GFX off during the * debug session.
*/
uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev, bool restore_dbg_registers,
uint32_t vmid)
{
mutex_lock(&adev->grbm_idx_mutex);
/* * keep_trap_enabled is ignored here but is a general interface requirement * for devices that support multi-process debugging where the performance * overhead from trap temporary setup needs to be bypassed when the debug * session has ended.
*/
uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled,
uint32_t vmid)
{
mutex_lock(&adev->grbm_idx_mutex);
/* The SPI_GDBG_TRAP_MASK register is global and affects all * processes. Only allow OR-ing the address-watch bit, since * this only affects processes under the debugger. Other bits * should stay 0 to avoid the debugger interfering with other * processes.
*/ if (trap_override != KFD_DBG_TRAP_OVERRIDE_OR) return -EINVAL;
/** * get_wave_count: Read device registers to get number of waves in flight for * a particular queue. The method also returns the VMID associated with the * queue. * * @adev: Handle of device whose registers are to be read * @queue_idx: Index of queue in the queue-map bit-field * @queue_cnt: Stores the wave count and doorbell offset for an active queue * @inst: xcc's instance number on a multi-XCC setup
*/ staticvoid get_wave_count(struct amdgpu_device *adev, int queue_idx, struct kfd_cu_occupancy *queue_cnt, uint32_t inst)
{ int pipe_idx; int queue_slot; unsignedint reg_val; unsignedint wave_cnt; /* * Program GRBM with appropriate MEID, PIPEID, QUEUEID and VMID * parameters to read out waves in flight. Get VMID if there are * non-zero waves in flight.
*/
pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe;
queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe;
soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, GET_INST(GC, inst));
reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, GET_INST(GC, inst),
mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot);
wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; if (wave_cnt != 0) {
queue_cnt->wave_cnt += wave_cnt;
queue_cnt->doorbell_off =
(RREG32_SOC15(GC, GET_INST(GC, inst), mmCP_HQD_PQ_DOORBELL_CONTROL) &
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK) >>
CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
}
}
/** * kgd_gfx_v9_get_cu_occupancy: Reads relevant registers associated with each * shader engine and aggregates the number of waves that are in flight for the * process whose pasid is provided as a parameter. The process could have ZERO * or more queues running and submitting waves to compute units. * * @adev: Handle of device from which to get number of waves in flight * @cu_occupancy: Array that gets filled with wave_cnt and doorbell offset * for comparison later. * @max_waves_per_cu: Output parameter updated with maximum number of waves * possible per Compute Unit * @inst: xcc's instance number on a multi-XCC setup * * Note: It's possible that the device has too many queues (oversubscription) * in which case a VMID could be remapped to a different PASID. This could lead * to an inaccurate wave count. Following is a high-level sequence: * Time T1: vmid = getVmid(); vmid is associated with Pasid P1 * Time T2: passId = getPasId(vmid); vmid is associated with Pasid P2 * In the sequence above wave count obtained from time T1 will be incorrectly * lost or added to total wave count. * * The registers that provide the waves in flight are: * * SPI_CSQ_WF_ACTIVE_STATUS - bit-map of queues per pipe. The bit is ON if a * queue is slotted, OFF if there is no queue. A process could have ZERO or * more queues slotted and submitting waves to be run on compute units. Even * when there is a queue it is possible there could be zero wave fronts, this * can happen when queue is waiting on top-of-pipe events - e.g. waitRegMem * command * * For each bit that is ON from above: * * Read (SPI_CSQ_WF_ACTIVE_COUNT_0 + queue_idx) register. It provides the * number of waves that are in flight for the queue at specified index. The * index ranges from 0 to 7. * * If non-zero waves are in flight, store the corresponding doorbell offset * of the queue, along with the wave count. * * Determine if the queue belongs to the process by comparing the doorbell * offset against the process's queues. If it matches, aggregate the wave * count for the process. * * Reading registers referenced above involves programming GRBM appropriately
*/ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, struct kfd_cu_occupancy *cu_occupancy, int *max_waves_per_cu, uint32_t inst)
{ int qidx; int se_idx; int se_cnt; int queue_map; int max_queue_cnt;
DECLARE_BITMAP(cp_queue_bitmap, AMDGPU_MAX_QUEUES);
/* * Iterate through the shader engines and arrays of the device * to get number of waves in flight
*/
bitmap_complement(cp_queue_bitmap, adev->gfx.mec_bitmap[0].queue_bitmap,
AMDGPU_MAX_QUEUES);
max_queue_cnt = adev->gfx.mec.num_pipe_per_mec *
adev->gfx.mec.num_queue_per_pipe;
se_cnt = adev->gfx.config.max_shader_engines; for (se_idx = 0; se_idx < se_cnt; se_idx++) {
amdgpu_gfx_select_se_sh(adev, se_idx, 0, 0xffffffff, inst);
queue_map = RREG32_SOC15(GC, GET_INST(GC, inst), mmSPI_CSQ_WF_ACTIVE_STATUS);
/* * Assumption: queue map encodes following schema: four * pipes per each micro-engine, with each pipe mapping * eight queues. This schema is true for GFX9 devices * and must be verified for newer device families
*/ for (qidx = 0; qidx < max_queue_cnt; qidx++) { /* Skip qeueus that are not associated with * compute functions
*/ if (!test_bit(qidx, cp_queue_bitmap)) continue;
if (!(queue_map & (1 << qidx))) continue;
/* Get number of waves in flight and aggregate them */
get_wave_count(adev, qidx, &cu_occupancy[qidx],
inst);
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.