/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: monk liu <monk.liu@amd.com>
*/
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{ switch (ctx_prio) { case AMDGPU_CTX_PRIORITY_VERY_LOW: case AMDGPU_CTX_PRIORITY_LOW: case AMDGPU_CTX_PRIORITY_NORMAL: case AMDGPU_CTX_PRIORITY_HIGH: case AMDGPU_CTX_PRIORITY_VERY_HIGH: returntrue; default: case AMDGPU_CTX_PRIORITY_UNSET: /* UNSET priority is not valid and we don't carry that * around, but set it to NORMAL in the only place this * function is called, amdgpu_ctx_ioctl().
*/ returnfalse;
}
}
staticenum drm_sched_priority
amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{ switch (ctx_prio) { case AMDGPU_CTX_PRIORITY_UNSET:
pr_warn_once("AMD-->DRM context priority value UNSET-->NORMAL"); return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_VERY_LOW: return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_LOW: return DRM_SCHED_PRIORITY_LOW;
case AMDGPU_CTX_PRIORITY_NORMAL: return DRM_SCHED_PRIORITY_NORMAL;
case AMDGPU_CTX_PRIORITY_HIGH: return DRM_SCHED_PRIORITY_HIGH;
case AMDGPU_CTX_PRIORITY_VERY_HIGH: return DRM_SCHED_PRIORITY_HIGH;
/* This should not happen as we sanitized userspace provided priority * already, WARN if this happens.
*/ default:
WARN(1, "Invalid context priority %d\n", ctx_prio); return DRM_SCHED_PRIORITY_NORMAL;
}
}
staticint amdgpu_ctx_priority_permit(struct drm_file *filp,
int32_t priority)
{ /* NORMAL and below are accessible by everyone */ if (priority <= AMDGPU_CTX_PRIORITY_NORMAL) return 0;
if (capable(CAP_SYS_NICE)) return 0;
if (drm_is_current_master(filp)) return 0;
return -EACCES;
}
staticenum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_gfx_pipe_prio(int32_t prio)
{ switch (prio) { case AMDGPU_CTX_PRIORITY_HIGH: case AMDGPU_CTX_PRIORITY_VERY_HIGH: return AMDGPU_GFX_PIPE_PRIO_HIGH; default: return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
}
/* Calculate the time spend on the hw */ static ktime_t amdgpu_ctx_fence_time(struct dma_fence *fence)
{ struct drm_sched_fence *s_fence;
if (!fence) return ns_to_ktime(0);
/* When the fence is not even scheduled it can't have spend time */
s_fence = to_drm_sched_fence(fence); if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->scheduled.flags)) return ns_to_ktime(0);
/* When it is still running account how much already spend */ if (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &s_fence->finished.flags)) return ktime_sub(ktime_get(), s_fence->scheduled.timestamp);
spin_lock(&ctx->ring_lock); for (i = 0; i < amdgpu_sched_jobs; i++) {
res = ktime_add(res, amdgpu_ctx_fence_time(centity->fences[i]));
}
spin_unlock(&ctx->ring_lock); return res;
}
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
amdgpu_ctx_set_stable_pstate(ctx, ctx->stable_pstate);
drm_dev_exit(idx);
}
kfree(ctx);
}
int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
u32 ring, struct drm_sched_entity **entity)
{ int r; struct drm_sched_entity *ctx_entity;
if (hw_ip >= AMDGPU_HW_IP_NUM) {
DRM_ERROR("unknown HW IP type: %d\n", hw_ip); return -EINVAL;
}
/* Right now all IPs have only one instance - multiple rings. */ if (instance != 0) {
DRM_DEBUG("invalid ip instance: %d\n", instance); return -EINVAL;
}
/* TODO: these two are always zero */
out->state.flags = 0x0;
out->state.hangs = 0x0;
/* determine if a GPU reset has occured since the last call */
reset_counter = atomic_read(&adev->gpu_reset_counter); /* TODO: this should ideally return NO, GUILTY, or INNOCENT. */ if (ctx->reset_counter_query == reset_counter)
out->state.reset_status = AMDGPU_CTX_NO_RESET; else
out->state.reset_status = AMDGPU_CTX_UNKNOWN_RESET;
ctx->reset_counter_query = reset_counter;
id = args->in.ctx_id;
priority = args->in.priority;
/* For backwards compatibility, we need to accept ioctls with garbage * in the priority field. Garbage values in the priority field, result * in the priority being set to NORMAL.
*/ if (!amdgpu_ctx_priority_is_valid(priority))
priority = AMDGPU_CTX_PRIORITY_NORMAL;
switch (args->in.op) { case AMDGPU_CTX_OP_ALLOC_CTX: if (args->in.flags) return -EINVAL;
r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
args->out.alloc.ctx_id = id; break; case AMDGPU_CTX_OP_FREE_CTX: if (args->in.flags) return -EINVAL;
r = amdgpu_ctx_free(fpriv, id); break; case AMDGPU_CTX_OP_QUERY_STATE: if (args->in.flags) return -EINVAL;
r = amdgpu_ctx_query(adev, fpriv, id, &args->out); break; case AMDGPU_CTX_OP_QUERY_STATE2: if (args->in.flags) return -EINVAL;
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); break; case AMDGPU_CTX_OP_GET_STABLE_PSTATE: if (args->in.flags) return -EINVAL;
r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate); if (!r)
args->out.pstate.flags = stable_pstate; break; case AMDGPU_CTX_OP_SET_STABLE_PSTATE: if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK) return -EINVAL;
stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK; if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK) return -EINVAL;
r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate); break; default: return -EINVAL;
}
/* * This is a little bit racy because it can be that a ctx or a fence are * destroyed just in the moment we try to account them. But that is ok * since exactly that case is explicitely allowed by the interface.
*/
mutex_lock(&mgr->lock); for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) {
uint64_t ns = atomic64_read(&mgr->time_spend[hw_ip]);
usage[hw_ip] = ns_to_ktime(ns);
}
idr_for_each_entry(&mgr->ctx_handles, ctx, id) { for (hw_ip = 0; hw_ip < AMDGPU_HW_IP_NUM; ++hw_ip) { for (i = 0; i < amdgpu_ctx_num_entities[hw_ip]; ++i) { struct amdgpu_ctx_entity *centity;
ktime_t spend;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.