/** * DOC: PXP * * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms. * It allows execution and flip to display of protected (i.e. encrypted) * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig. * * Objects can opt-in to PXP encryption at creation time via the * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be * correctly protected they must be used in conjunction with a context created * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation * of those two uapi flags for details and restrictions. * * Protected objects are tied to a pxp session; currently we only support one * session, which i915 manages and whose index is available in the uapi * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting * protected objects. * The session is invalidated by the HW when certain events occur (e.g. * suspend/resume). When this happens, all the objects that were used with the * session are marked as invalid and all contexts marked as using protected * content are banned. Any further attempt at using them in an execbuf call is * rejected, while flips are converted to black frames. * * Some of the PXP setup operations are performed by the Management Engine, * which is handled by the mei driver; communication between i915 and mei is * performed via the mei_pxp component module.
*/
/* * Find the first VCS engine present. We're guaranteed there is one * if we're in this function due to the check in has_pxp
*/ for (i = 0, engine = NULL; !engine; i++)
engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
/* * we'll use the completion to check if there is a termination pending, * so we start it as completed and we reinit it when a termination * is triggered.
*/
init_completion(&pxp->termination);
complete_all(&pxp->termination);
if (HAS_ENGINE(pxp->ctrl_gt, GSC0))
ret = intel_pxp_gsccs_init(pxp); else
ret = intel_pxp_tee_component_init(pxp); if (ret) goto out_context;
drm_info(>->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
return;
out_context:
destroy_vcs_context(pxp);
}
staticstruct intel_gt *find_gt_for_required_teelink(struct drm_i915_private *i915)
{ /* * NOTE: Only certain platforms require PXP-tee-backend dependencies * for HuC authentication. For now, its limited to DG2.
*/ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && IS_ENABLED(CONFIG_INTEL_MEI_GSC) &&
intel_huc_is_loaded_by_gsc(&to_gt(i915)->uc.huc) && intel_uc_uses_huc(&to_gt(i915)->uc)) return to_gt(i915);
return NULL;
}
staticstruct intel_gt *find_gt_for_required_protected_content(struct drm_i915_private *i915)
{ if (!HAS_PXP(i915)) return NULL;
/* * For MTL onwards, PXP-controller-GT needs to have a valid GSC engine * on the media GT. NOTE: if we have a media-tile with a GSC-engine, * the VDBOX is already present so skip that check. We also have to * ensure the GSC and HUC firmware are coming online
*/ if (i915->media_gt && HAS_ENGINE(i915->media_gt, GSC0) &&
intel_uc_fw_is_loadable(&i915->media_gt->uc.gsc.fw) &&
intel_uc_fw_is_loadable(&i915->media_gt->uc.huc.fw)) return i915->media_gt;
/* * Else we rely on mei-pxp module but only on legacy platforms * prior to having separate media GTs and has a valid VDBOX.
*/ if (IS_ENABLED(CONFIG_INTEL_MEI_PXP) && !i915->media_gt && VDBOX_MASK(to_gt(i915))) return to_gt(i915);
if (intel_gt_is_wedged(to_gt(i915))) return -ENOTCONN;
/* * NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since * we still need it if PXP's backend tee transport is needed.
*/
gt = find_gt_for_required_protected_content(i915); if (gt)
is_full_feature = true; else
gt = find_gt_for_required_teelink(i915);
if (!gt) return -ENODEV;
/* * At this point, we will either enable full featured PXP capabilities * including session and object management, or we will init the backend tee * channel for internal users such as HuC loading by GSC
*/
i915->pxp = kzalloc(sizeof(*i915->pxp), GFP_KERNEL); if (!i915->pxp) return -ENOMEM;
/* init common info used by all feature-mode usages*/
i915->pxp->ctrl_gt = gt;
mutex_init(&i915->pxp->tee_mutex);
/* * If full PXP feature is not available but HuC is loaded by GSC on pre-MTL * such as DG2, we can skip the init of the full PXP session/object management * and just init the tee channel.
*/ if (is_full_feature)
pxp_init_full(i915->pxp); else
intel_pxp_tee_component_init(i915->pxp);
return 0;
}
void intel_pxp_fini(struct drm_i915_private *i915)
{ if (!i915->pxp) return;
i915->pxp->arb_is_valid = false;
if (HAS_ENGINE(i915->pxp->ctrl_gt, GSC0))
intel_pxp_gsccs_fini(i915->pxp); else
intel_pxp_tee_component_fini(i915->pxp);
/* * We want to get the same effect as if we received a termination * interrupt, so just pretend that we did.
*/
spin_lock_irq(gt->irq_lock);
intel_pxp_mark_termination_in_progress(pxp);
pxp->session_events |= PXP_TERMINATION_REQUEST;
queue_work(system_unbound_wq, &pxp->session_work);
spin_unlock_irq(gt->irq_lock);
}
mutex_lock(&pxp->tee_mutex); if (pxp->pxp_component)
bound = true;
mutex_unlock(&pxp->tee_mutex);
return bound;
}
int intel_pxp_get_backend_timeout_ms(struct intel_pxp *pxp)
{ if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) return GSCFW_MAX_ROUND_TRIP_LATENCY_MS; else return 250;
}
staticint __pxp_global_teardown_final(struct intel_pxp *pxp)
{ int timeout;
if (!pxp->arb_is_valid) return 0;
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini"); /* * To ensure synchronous and coherent session teardown completion * in response to suspend or shutdown triggers, don't use a worker.
*/
intel_pxp_mark_termination_in_progress(pxp);
intel_pxp_terminate(pxp, false);
timeout = intel_pxp_get_backend_timeout_ms(pxp);
if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) return -ETIMEDOUT;
return 0;
}
staticint __pxp_global_teardown_restart(struct intel_pxp *pxp)
{ int timeout;
if (pxp->arb_is_valid) return 0;
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart"); /* * The arb-session is currently inactive and we are doing a reset and restart * due to a runtime event. Use the worker that was designed for this.
*/
pxp_queue_termination(pxp);
timeout = intel_pxp_get_backend_timeout_ms(pxp);
if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) {
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)",
timeout); return -ETIMEDOUT;
}
staticbool pxp_required_fw_failed(struct intel_pxp *pxp)
{ if (__intel_uc_fw_status(&pxp->ctrl_gt->uc.huc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) returntrue; if (HAS_ENGINE(pxp->ctrl_gt, GSC0) &&
__intel_uc_fw_status(&pxp->ctrl_gt->uc.gsc.fw) == INTEL_UC_FIRMWARE_LOAD_FAIL) returntrue;
returnfalse;
}
staticbool pxp_fw_dependencies_completed(struct intel_pxp *pxp)
{ if (HAS_ENGINE(pxp->ctrl_gt, GSC0)) return intel_pxp_gsccs_is_ready_for_sessions(pxp);
return pxp_component_bound(pxp);
}
/* * this helper is used by both intel_pxp_start and by * the GET_PARAM IOCTL that user space calls. Thus, the * return values here should match the UAPI spec.
*/ int intel_pxp_get_readiness_status(struct intel_pxp *pxp, int timeout_ms)
{ if (!intel_pxp_is_enabled(pxp)) return -ENODEV;
if (pxp_required_fw_failed(pxp)) return -ENODEV;
if (pxp->platform_cfg_is_bad) return -ENODEV;
if (timeout_ms) { if (wait_for(pxp_fw_dependencies_completed(pxp), timeout_ms)) return 2;
} elseif (!pxp_fw_dependencies_completed(pxp)) { return 2;
} return 1;
}
/* * the arb session is restarted from the irq work when we receive the * termination completion interrupt
*/ #define PXP_READINESS_TIMEOUT 250
int intel_pxp_start(struct intel_pxp *pxp)
{ int ret = 0;
ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT); if (ret < 0) {
drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret); return ret;
} elseif (ret > 1) { return -EIO; /* per UAPI spec, user may retry later */
}
mutex_lock(&pxp->arb_mutex);
ret = __pxp_global_teardown_restart(pxp); if (ret) goto unlock;
/* make sure the compiler doesn't optimize the double access */
barrier();
if (!i915_gem_object_is_protected(obj)) return -EINVAL;
GEM_BUG_ON(!pxp->key_instance);
/* * If this is the first time we're using this object, it's not * encrypted yet; it will be encrypted with the current key, so mark it * as such. If the object is already encrypted, check instead if the * used key is still valid.
*/ if (!obj->pxp_key_instance && assign)
obj->pxp_key_instance = pxp->key_instance;
if (obj->pxp_key_instance != pxp->key_instance) return -ENOEXEC;
/* ban all contexts marked as protected */
spin_lock_irq(&i915->gem.contexts.lock);
list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { struct i915_gem_engines_iter it; struct intel_context *ce;
if (!kref_get_unless_zero(&ctx->ref)) continue;
if (likely(!i915_gem_context_uses_protected_content(ctx))) {
i915_gem_context_put(ctx); continue;
}
spin_unlock_irq(&i915->gem.contexts.lock);
/* * By the time we get here we are either going to suspend with * quiesced execution or the HW keys are already long gone and * in this case it is worthless to attempt to close the context * and wait for its execution. It will hang the GPU if it has * not already. So, as a fast mitigation, we can ban the * context as quick as we can. That might race with the * execbuffer, but currently this is the best that can be done.
*/
for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
intel_context_ban(ce, NULL);
i915_gem_context_unlock_engines(ctx);
/* * The context has been banned, no need to keep the wakeref. * This is safe from races because the only other place this * is touched is context_release and we're holding a ctx ref
*/ if (ctx->pxp_wakeref) {
intel_runtime_pm_put(&i915->runtime_pm,
ctx->pxp_wakeref);
ctx->pxp_wakeref = NULL;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.