/* * This file implements HW context support. On gen5+ a HW context consists of an * opaque GPU object which is referenced at times of context saves and restores. * With RC6 enabled, the context is also referenced as the GPU enters and exists * from RC6 (GPU has it's own internal power context, except on gen5). Though * something like a context does exist for the media ring, the code only * supports contexts for the render ring. * * In software, there is a distinction between contexts created by the user, * and the default HW context. The default HW context is used by GPU clients * that do not request setup of their own hardware context. The default * context's state is never restored to help prevent programming errors. This * would happen if a client ran and piggy-backed off another clients GPU state. * The default context only exists to give the GPU some offset to load as the * current to invoke a save of the context we actually care about. In fact, the * code could likely be constructed, albeit in a more complicated fashion, to * never use the default context, though that limits the driver's ability to * swap out, and/or destroy other contexts. * * All other contexts are created as a request by the GPU client. These contexts * store GPU state, and thus allow GPU clients to not re-emit state (and * potentially query certain state) at any time. The kernel driver makes * certain that the appropriate commands are inserted. * * The context life cycle is semi-complicated in that context BOs may live * longer than the context itself because of the way the hardware, and object * tracking works. Below is a very crude representation of the state machine * describing the context life. * refcount pincount active * S0: initial state 0 0 0 * S1: context created 1 0 0 * S2: context is currently running 2 1 X * S3: GPU referenced, but not current 2 0 1 * S4: context is current, but destroyed 1 1 0 * S5: like S3, but destroyed 1 0 1 * * The most common (but not all) transitions: * S0->S1: client creates a context * S1->S2: client submits execbuf with context * S2->S3: other clients submits execbuf with context * S3->S1: context object was retired * S3->S2: clients submits another execbuf * S2->S4: context destroy called with current context * S3->S5->S0: destroy path * S4->S5->S0: destroy path on current context * * There are two confusing terms used above: * The "current context" means the context which is currently running on the * GPU. The GPU has loaded its state already and has stored away the gtt * offset of the BO. The GPU is not actively referencing the data at this * offset, but it will on the next context switch. The only way to avoid this * is to do a GPU reset. * * An "active context' is one which was previously the "current context" and is * on the active list waiting for the next context switch to occur. Until this * happens, the object must remain at the same gtt offset. It is therefore * possible to destroy a context, but it is still active. *
*/
if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) return -ENODEV;
if (priority > I915_CONTEXT_MAX_USER_PRIORITY ||
priority < I915_CONTEXT_MIN_USER_PRIORITY) return -EINVAL;
if (priority > I915_CONTEXT_DEFAULT_PRIORITY &&
!capable(CAP_SYS_NICE)) return -EPERM;
return 0;
}
staticvoid proto_context_close(struct drm_i915_private *i915, struct i915_gem_proto_context *pc)
{ int i;
if (pc->pxp_wakeref)
intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref); if (pc->vm)
i915_vm_put(pc->vm); if (pc->user_engines) { for (i = 0; i < pc->num_user_engines; i++)
kfree(pc->user_engines[i].siblings);
kfree(pc->user_engines);
}
kfree(pc);
}
staticint proto_context_set_persistence(struct drm_i915_private *i915, struct i915_gem_proto_context *pc, bool persist)
{ if (persist) { /* * Only contexts that are short-lived [that will expire or be * reset] are allowed to survive past termination. We require * hangcheck to ensure that the persistent requests are healthy.
*/ if (!i915->params.enable_hangcheck) return -EINVAL;
pc->user_flags |= BIT(UCONTEXT_PERSISTENCE);
} else { /* To cancel a context we use "preempt-to-idle" */ if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) return -ENODEV;
/* * If the cancel fails, we then need to reset, cleanly! * * If the per-engine reset fails, all hope is lost! We resort * to a full GPU reset in that unlikely case, but realistically * if the engine could not reset, the full reset does not fare * much better. The damage has been done. * * However, if we cannot reset an engine by itself, we cannot * cleanup a hanging persistent context without causing * collateral damage, and we should not pretend we can by * exposing the interface.
*/ if (!intel_has_reset_engine(to_gt(i915))) return -ENODEV;
pc->user_flags &= ~BIT(UCONTEXT_PERSISTENCE);
}
return 0;
}
staticint proto_context_set_protected(struct drm_i915_private *i915, struct i915_gem_proto_context *pc, boolprotected)
{ int ret = 0;
/* * protected context usage requires the PXP session to be up, * which in turn requires the device to be active.
*/
pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
if (!intel_pxp_is_active(i915->pxp))
ret = intel_pxp_start(i915->pxp);
}
set.num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines); /* RING_MASK has no shift so we can use it directly here */ if (set.num_engines > I915_EXEC_RING_MASK + 1) return -EINVAL;
set.engines = kmalloc_array(set.num_engines, sizeof(*set.engines), GFP_KERNEL); if (!set.engines) return -ENOMEM;
for (n = 0; n < set.num_engines; n++) { struct i915_engine_class_instance ci; struct intel_engine_cs *engine;
if (copy_from_user(&ci, &user->engines[n], sizeof(ci))) {
kfree(set.engines); return -EFAULT;
}
if (args->size < sizeof(user_sseu)) return -EINVAL;
if (GRAPHICS_VER(i915) != 11) return -ENODEV;
if (copy_from_user(&user_sseu, u64_to_user_ptr(args->value), sizeof(user_sseu))) return -EFAULT;
if (user_sseu.rsvd) return -EINVAL;
if (user_sseu.flags & ~(I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX)) return -EINVAL;
if (!!(user_sseu.flags & I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX) != (pc->num_user_engines >= 0)) return -EINVAL;
if (pc->num_user_engines >= 0) { int idx = user_sseu.engine.engine_instance; struct i915_gem_proto_engine *pe;
if (idx >= pc->num_user_engines) return -EINVAL;
idx = array_index_nospec(idx, pc->num_user_engines);
pe = &pc->user_engines[idx];
/* Only render engine supports RPCS configuration. */ if (pe->engine->class != RENDER_CLASS) return -EINVAL;
sseu = &pe->sseu;
} else { /* Only render engine supports RPCS configuration. */ if (user_sseu.engine.engine_class != I915_ENGINE_CLASS_RENDER) return -EINVAL;
/* There is only one render engine */ if (user_sseu.engine.engine_instance != 0) return -EINVAL;
sseu = &pc->legacy_rcs_sseu;
}
ret = i915_gem_user_to_context_sseu(to_gt(i915), &user_sseu, sseu); if (ret) return ret;
args->size = sizeof(user_sseu);
return 0;
}
staticint set_proto_ctx_param(struct drm_i915_file_private *fpriv, struct i915_gem_proto_context *pc, struct drm_i915_gem_context_param *args)
{ struct drm_i915_private *i915 = fpriv->i915; int ret = 0;
switch (args->param) { case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: if (args->size)
ret = -EINVAL; elseif (args->value)
pc->user_flags |= BIT(UCONTEXT_NO_ERROR_CAPTURE); else
pc->user_flags &= ~BIT(UCONTEXT_NO_ERROR_CAPTURE); break;
case I915_CONTEXT_PARAM_BANNABLE: if (args->size)
ret = -EINVAL; elseif (!capable(CAP_SYS_ADMIN) && !args->value)
ret = -EPERM; elseif (args->value)
pc->user_flags |= BIT(UCONTEXT_BANNABLE); elseif (pc->uses_protected_content)
ret = -EPERM; else
pc->user_flags &= ~BIT(UCONTEXT_BANNABLE); break;
case I915_CONTEXT_PARAM_LOW_LATENCY: if (intel_uc_uses_guc_submission(&to_gt(i915)->uc))
pc->user_flags |= BIT(UCONTEXT_LOW_LATENCY); else
ret = -EINVAL; break;
case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size)
ret = -EINVAL; elseif (!args->value)
pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE); elseif (pc->uses_protected_content)
ret = -EPERM; else
pc->user_flags |= BIT(UCONTEXT_RECOVERABLE); break;
case I915_CONTEXT_PARAM_PRIORITY:
ret = validate_priority(fpriv->i915, args); if (!ret)
pc->sched.priority = args->value; break;
case I915_CONTEXT_PARAM_SSEU:
ret = set_proto_ctx_sseu(fpriv, pc, args); break;
case I915_CONTEXT_PARAM_VM:
ret = set_proto_ctx_vm(fpriv, pc, args); break;
case I915_CONTEXT_PARAM_ENGINES:
ret = set_proto_ctx_engines(fpriv, pc, args); break;
case I915_CONTEXT_PARAM_PERSISTENCE: if (args->size)
ret = -EINVAL; else
ret = proto_context_set_persistence(fpriv->i915, pc,
args->value); break;
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
ret = proto_context_set_protected(fpriv->i915, pc,
args->value); break;
case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: case I915_CONTEXT_PARAM_CONTEXT_IMAGE: default:
ret = -EINVAL; break;
}
return ret;
}
staticint intel_context_set_gem(struct intel_context *ce, struct i915_gem_context *ctx, struct intel_sseu sseu)
{ int ret = 0;
/* A valid SSEU has no zero fields */ if (sseu.slice_mask && !WARN_ON(ce->engine->class != RENDER_CLASS))
ret = intel_context_reconfigure_sseu(ce, sseu);
if (test_bit(UCONTEXT_LOW_LATENCY, &ctx->user_flags))
__set_bit(CONTEXT_LOW_LATENCY, &ce->flags);
e = alloc_engines(num_engines); if (!e) return ERR_PTR(-ENOMEM);
e->num_engines = num_engines;
for (n = 0; n < num_engines; n++) { struct intel_context *ce, *child; int ret;
switch (pe[n].type) { case I915_GEM_ENGINE_TYPE_PHYSICAL:
ce = intel_context_create(pe[n].engine); break;
case I915_GEM_ENGINE_TYPE_BALANCED:
ce = intel_engine_create_virtual(pe[n].siblings,
pe[n].num_siblings, 0); break;
case I915_GEM_ENGINE_TYPE_PARALLEL:
ce = intel_engine_create_parallel(pe[n].siblings,
pe[n].num_siblings,
pe[n].width); break;
case I915_GEM_ENGINE_TYPE_INVALID: default:
GEM_WARN_ON(pe[n].type != I915_GEM_ENGINE_TYPE_INVALID); continue;
}
if (IS_ERR(ce)) {
err = ERR_CAST(ce); goto free_engines;
}
e->engines[n] = ce;
ret = intel_context_set_gem(ce, ctx, pe->sseu); if (ret) {
err = ERR_PTR(ret); goto free_engines;
}
for_each_child(ce, child) {
ret = intel_context_set_gem(child, ctx, pe->sseu); if (ret) {
err = ERR_PTR(ret); goto free_engines;
}
}
/* * XXX: Must be done after calling intel_context_set_gem as that * function changes the ring size. The ring is allocated when * the context is pinned. If the ring size is changed after * allocation we have a mismatch of the ring size and will cause * the context to hang. Presumably with a bit of reordering we * could move the perma-pin step to the backend function * intel_engine_create_parallel.
*/ if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
ret = perma_pin_contexts(ce); if (ret) {
err = ERR_PTR(ret); goto free_engines;
}
}
}
staticbool __cancel_engine(struct intel_engine_cs *engine)
{ /* * Send a "high priority pulse" down the engine to cause the * current request to be momentarily preempted. (If it fails to * be preempted, it will be reset). As we have marked our context * as banned, any incomplete request, including any running, will * be skipped following the preemption. * * If there is no hangchecking (one of the reasons why we try to * cancel the context) and no forced preemption, there may be no * means by which we reset the GPU and evict the persistent hog. * Ergo if we are unable to inject a preemptive pulse that can * kill the banned context, we fallback to doing a local reset * instead.
*/ return intel_engine_pulse(engine) == 0;
}
if (intel_context_has_inflight(ce)) return intel_context_inflight(ce);
if (!ce->timeline) return NULL;
/* * rq->link is only SLAB_TYPESAFE_BY_RCU, we need to hold a reference * to the request to prevent it being transferred to a new timeline * (and onto a new timeline->requests list).
*/
rcu_read_lock();
list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { bool found;
/* timeline is already completed upto this point? */ if (!i915_request_get_rcu(rq)) break;
/* Check with the backend if the request is inflight */
found = true; if (likely(rcu_access_pointer(rq->timeline) == ce->timeline))
found = i915_request_active_engine(rq, &engine);
i915_request_put(rq); if (found) break;
}
rcu_read_unlock();
/* * Map the user's engine back to the actual engines; one virtual * engine will be mapped to multiple engines, and using ctx->engine[] * the same engine may be have multiple instances in the user's map. * However, we only care about pending requests, so only include * engines on which there are incomplete requests.
*/
for_each_gem_engine(ce, engines, it) { struct intel_engine_cs *engine;
/* * Check the current active state of this context; if we * are currently executing on the GPU we need to evict * ourselves. On the other hand, if we haven't yet been * submitted to the GPU or if everything is complete, * we have nothing to do.
*/
engine = active_engine(ce);
/* First attempt to gracefully cancel the context */ if (engine && !__cancel_engine(engine) && (exit || !persistent)) /* * If we are unable to send a preemptive pulse to bump * the context from the GPU, we have to resort to a full * reset. We hope the collateral damage is worth it.
*/
__reset_context(engines->ctx, engine);
}
}
/* serialises with execbuf */
intel_context_close(ce); if (!intel_context_pin_if_active(ce)) continue;
/* Wait until context is finally scheduled out and retired */
err = i915_sw_fence_await_active(&engines->fence,
&ce->active,
I915_ACTIVE_AWAIT_BARRIER);
intel_context_unpin(ce); if (err) goto kill;
}
spin_lock_irq(&ctx->stale.lock); if (!i915_gem_context_is_closed(ctx))
list_add_tail(&engines->link, &ctx->stale.engines);
spin_unlock_irq(&ctx->stale.lock);
/* * The LUT uses the VMA as a backpointer to unref the object, * so we need to clear the LUT before we close all the VMA (inside * the ppgtt).
*/
lut_close(ctx);
ctx->file_priv = ERR_PTR(-EBADF);
client = ctx->client; if (client) {
spin_lock(&client->ctx_lock);
list_del_rcu(&ctx->client_link);
spin_unlock(&client->ctx_lock);
}
mutex_unlock(&ctx->mutex);
/* * If the user has disabled hangchecking, we can not be sure that * the batches will ever complete after the context is closed, * keeping the context and all resources pinned forever. So in this * case we opt to forcibly kill off all remaining requests on * context close.
*/
kill_context(ctx);
if (state) { /* * Only contexts that are short-lived [that will expire or be * reset] are allowed to survive past termination. We require * hangcheck to ensure that the persistent requests are healthy.
*/ if (!ctx->i915->params.enable_hangcheck) return -EINVAL;
i915_gem_context_set_persistence(ctx);
} else { /* To cancel a context we use "preempt-to-idle" */ if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) return -ENODEV;
/* * If the cancel fails, we then need to reset, cleanly! * * If the per-engine reset fails, all hope is lost! We resort * to a full GPU reset in that unlikely case, but realistically * if the engine could not reset, the full reset does not fare * much better. The damage has been done. * * However, if we cannot reset an engine by itself, we cannot * cleanup a hanging persistent context without causing * collateral damage, and we should not pretend we can by * exposing the interface.
*/ if (!intel_has_reset_engine(to_gt(ctx->i915))) return -ENODEV;
/* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there
* is no remap info, it will be a NOP. */
ctx->remap_slice = ALL_L3_SLICES(i915);
for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
if (pc->single_timeline) {
err = drm_syncobj_create(&ctx->syncobj,
DRM_SYNCOBJ_CREATE_SIGNALED,
NULL); if (err) goto err_engines;
}
if (pc->uses_protected_content) {
ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
ctx->uses_protected_content = true;
}
if (!i915_gem_context_has_full_ppgtt(ctx)) return -ENODEV;
vm = ctx->vm;
GEM_BUG_ON(!vm);
/* * Get a reference for the allocated handle. Once the handle is * visible in the vm_xa table, userspace could try to close it * from under our feet, so we need to hold the extra reference * first.
*/
i915_vm_get(vm);
/* No zeros in any field. */ if (!user->slice_mask || !user->subslice_mask ||
!user->min_eus_per_subslice || !user->max_eus_per_subslice) return -EINVAL;
/* Max > min. */ if (user->max_eus_per_subslice < user->min_eus_per_subslice) return -EINVAL;
/* * Some future proofing on the types since the uAPI is wider than the * current internal implementation.
*/ if (overflows_type(user->slice_mask, context->slice_mask) ||
overflows_type(user->subslice_mask, context->subslice_mask) ||
overflows_type(user->min_eus_per_subslice,
context->min_eus_per_subslice) ||
overflows_type(user->max_eus_per_subslice,
context->max_eus_per_subslice)) return -EINVAL;
/* Check validity against hardware. */ if (user->slice_mask & ~device->slice_mask) return -EINVAL;
if (user->subslice_mask & ~dev_subslice_mask) return -EINVAL;
if (user->max_eus_per_subslice > device->max_eus_per_subslice) return -EINVAL;
/* Part specific restrictions. */ if (GRAPHICS_VER(i915) == 11) { unsignedint hw_s = hweight8(device->slice_mask); unsignedint hw_ss_per_s = hweight8(dev_subslice_mask); unsignedint req_s = hweight8(context->slice_mask); unsignedint req_ss = hweight8(context->subslice_mask);
/* * Only full subslice enablement is possible if more than one * slice is turned on.
*/ if (req_s > 1 && req_ss != hw_ss_per_s) return -EINVAL;
/* * If more than four (SScount bitfield limit) subslices are * requested then the number has to be even.
*/ if (req_ss > 4 && (req_ss & 1)) return -EINVAL;
/* * If only one slice is enabled and subslice count is below the * device full enablement, it must be at most half of the all * available subslices.
*/ if (req_s == 1 && req_ss < hw_ss_per_s &&
req_ss > (hw_ss_per_s / 2)) return -EINVAL;
/* ABI restriction - VME use case only. */
/* All slices or one slice only. */ if (req_s != 1 && req_s != hw_s) return -EINVAL;
/* * Half subslices or full enablement only when one slice is * enabled.
*/ if (req_s == 1 &&
(req_ss != hw_ss_per_s && req_ss != (hw_ss_per_s / 2))) return -EINVAL;
/* No EU configuration changes. */ if ((user->min_eus_per_subslice !=
device->max_eus_per_subslice) ||
(user->max_eus_per_subslice !=
device->max_eus_per_subslice)) return -EINVAL;
}
if (!IS_ENABLED(CONFIG_DRM_I915_REPLAY_GPU_HANGS_API)) return -EINVAL;
if (!ctx->i915->params.enable_debug_only_api) return -EINVAL;
if (args->size < sizeof(user)) return -EINVAL;
if (copy_from_user(&user, u64_to_user_ptr(args->value), sizeof(user))) return -EFAULT;
if (user.mbz) return -EINVAL;
if (user.flags & ~(I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX)) return -EINVAL;
lookup = 0; if (user.flags & I915_CONTEXT_IMAGE_FLAG_ENGINE_INDEX)
lookup |= LOOKUP_USER_INDEX;
ce = lookup_user_engine(ctx, lookup, &user.engine); if (IS_ERR(ce)) return PTR_ERR(ce);
if (user.size < ce->engine->context_size) {
ret = -EINVAL; goto out_ce;
}
if (drm_WARN_ON_ONCE(&ctx->i915->drm,
test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { /* * This is racy but for a debug only API, if userspace is keen * to create and configure contexts, while simultaneously using * them from a second thread, let them suffer by potentially not * executing with the context image they just raced to apply.
*/
ret = -EBUSY; goto out_ce;
}
state = kmalloc(ce->engine->context_size, GFP_KERNEL); if (!state) {
ret = -ENOMEM; goto out_ce;
}
if (copy_from_user(state, u64_to_user_ptr(user.image),
ce->engine->context_size)) {
ret = -EFAULT; goto out_state;
}
shmem_state = shmem_create_from_data(ce->engine->name,
state, ce->engine->context_size); if (IS_ERR(shmem_state)) {
ret = PTR_ERR(shmem_state); goto out_state;
}
if (intel_context_set_own_state(ce)) {
ret = -EBUSY;
fput(shmem_state); goto out_state;
}
staticint ctx_setparam(struct drm_i915_file_private *fpriv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args)
{ int ret = 0;
switch (args->param) { case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: if (args->size)
ret = -EINVAL; elseif (args->value)
i915_gem_context_set_no_error_capture(ctx); else
i915_gem_context_clear_no_error_capture(ctx); break;
case I915_CONTEXT_PARAM_BANNABLE: if (args->size)
ret = -EINVAL; elseif (!capable(CAP_SYS_ADMIN) && !args->value)
ret = -EPERM; elseif (args->value)
i915_gem_context_set_bannable(ctx); elseif (i915_gem_context_uses_protected_content(ctx))
ret = -EPERM; /* can't clear this for protected contexts */ else
i915_gem_context_clear_bannable(ctx); break;
case I915_CONTEXT_PARAM_RECOVERABLE: if (args->size)
ret = -EINVAL; elseif (!args->value)
i915_gem_context_clear_recoverable(ctx); elseif (i915_gem_context_uses_protected_content(ctx))
ret = -EPERM; /* can't set this for protected contexts */ else
i915_gem_context_set_recoverable(ctx); break;
case I915_CONTEXT_PARAM_PRIORITY:
ret = set_priority(ctx, args); break;
case I915_CONTEXT_PARAM_SSEU:
ret = set_sseu(ctx, args); break;
case I915_CONTEXT_PARAM_PERSISTENCE:
ret = set_persistence(ctx, args); break;
case I915_CONTEXT_PARAM_CONTEXT_IMAGE:
ret = set_context_image(ctx, args); break;
case I915_CONTEXT_PARAM_PROTECTED_CONTENT: case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_RINGSIZE: case I915_CONTEXT_PARAM_VM: case I915_CONTEXT_PARAM_ENGINES: default:
ret = -EINVAL; break;
}
ctx = i915_gem_create_context(file_priv->i915, pc); if (IS_ERR(ctx)) return ctx;
/* * One for the xarray and one for the caller. We need to grab * the reference *prior* to making the ctx visible to userspace * in gem_context_register(), as at any point after that * userspace can try to race us with another thread destroying * the context under our feet.
*/
i915_gem_context_get(ctx);
gem_context_register(ctx, file_priv, id);
old = xa_erase(&file_priv->proto_context_xa, id);
GEM_BUG_ON(old != pc);
proto_context_close(file_priv->i915, pc);
ctx = __context_lookup(file_priv, id); if (ctx) return ctx;
mutex_lock(&file_priv->proto_context_lock); /* Try one more time under the lock */
ctx = __context_lookup(file_priv, id); if (!ctx) {
pc = xa_load(&file_priv->proto_context_xa, id); if (!pc)
ctx = ERR_PTR(-ENOENT); else
ctx = finalize_create_context_locked(file_priv, pc, id);
}
mutex_unlock(&file_priv->proto_context_lock);
/* We need to hold the proto-context lock here to prevent races * with finalize_create_context_locked().
*/
mutex_lock(&file_priv->proto_context_lock);
ctx = xa_erase(&file_priv->context_xa, args->ctx_id);
pc = xa_erase(&file_priv->proto_context_xa, args->ctx_id);
mutex_unlock(&file_priv->proto_context_lock);
if (!ctx && !pc) return -ENOENT;
GEM_WARN_ON(ctx && pc);
if (copy_to_user(u64_to_user_ptr(args->value), &user_sseu, sizeof(user_sseu))) return -EFAULT;
out:
args->size = sizeof(user_sseu);
return 0;
}
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_context *ctx; struct i915_address_space *vm; int ret = 0;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id); if (IS_ERR(ctx)) return PTR_ERR(ctx);
switch (args->param) { case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
vm = i915_gem_context_get_eb_vm(ctx);
args->value = vm->total;
i915_vm_put(vm);
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->size = 0;
args->value = i915_gem_context_no_error_capture(ctx); break;
case I915_CONTEXT_PARAM_BANNABLE:
args->size = 0;
args->value = i915_gem_context_is_bannable(ctx); break;
case I915_CONTEXT_PARAM_RECOVERABLE:
args->size = 0;
args->value = i915_gem_context_is_recoverable(ctx); break;
case I915_CONTEXT_PARAM_PRIORITY:
args->size = 0;
args->value = ctx->sched.priority; break;
case I915_CONTEXT_PARAM_SSEU:
ret = get_sseu(ctx, args); break;
case I915_CONTEXT_PARAM_VM:
ret = get_ppgtt(file_priv, ctx, args); break;
case I915_CONTEXT_PARAM_PERSISTENCE:
args->size = 0;
args->value = i915_gem_context_is_persistent(ctx); break;
case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
ret = get_protected(ctx, args); break;
case I915_CONTEXT_PARAM_NO_ZEROMAP: case I915_CONTEXT_PARAM_BAN_PERIOD: case I915_CONTEXT_PARAM_ENGINES: case I915_CONTEXT_PARAM_RINGSIZE: case I915_CONTEXT_PARAM_CONTEXT_IMAGE: default:
ret = -EINVAL; break;
}
i915_gem_context_put(ctx); return ret;
}
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{ struct drm_i915_file_private *file_priv = file->driver_priv; struct drm_i915_gem_context_param *args = data; struct i915_gem_proto_context *pc; struct i915_gem_context *ctx; int ret = 0;
mutex_lock(&file_priv->proto_context_lock);
ctx = __context_lookup(file_priv, args->ctx_id); if (!ctx) {
pc = xa_load(&file_priv->proto_context_xa, args->ctx_id); if (pc) { /* Contexts should be finalized inside
--> --------------------
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.