/** * DOC: GuC-based command submission * * The Scratch registers: * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes * a value to the action register (SOFT_SCRATCH_0) along with any data. It then * triggers an interrupt on the GuC via another register write (0xC4C8). * Firmware writes a success/fail code back to the action register after * processes the request. The kernel driver polls waiting for this update and * then proceeds. * * Command Transport buffers (CTBs): * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host * - G2H) are a message interface between the i915 and GuC. * * Context registration: * Before a context can be submitted it must be registered with the GuC via a * H2G. A unique guc_id is associated with each context. The context is either * registered at request creation time (normal operation) or at submission time * (abnormal operation, e.g. after a reset). * * Context submission: * The i915 updates the LRC tail value in memory. The i915 must enable the * scheduling of the context within the GuC for the GuC to actually consider it. * Therefore, the first time a disabled context is submitted we use a schedule * enable H2G, while follow up submissions are done via the context submit H2G, * which informs the GuC that a previously enabled context has new work * available. * * Context unpin: * To unpin a context a H2G is used to disable scheduling. When the * corresponding G2H returns indicating the scheduling disable operation has * completed it is safe to unpin the context. While a disable is in flight it * isn't safe to resubmit the context so a fence is used to stall all future * requests of that context until the G2H is returned. Because this interaction * with the GuC takes a non-zero amount of time we delay the disabling of * scheduling after the pin count goes to zero by a configurable period of time * (see SCHED_DISABLE_DELAY_MS). The thought is this gives the user a window of * time to resubmit something on the context before doing this costly operation. * This delay is only done if the context isn't closed and the guc_id usage is * less than a threshold (see NUM_SCHED_DISABLE_GUC_IDS_THRESHOLD). * * Context deregistration: * Before a context can be destroyed or if we steal its guc_id we must * deregister the context with the GuC via H2G. If stealing the guc_id it isn't * safe to submit anything to this guc_id until the deregister completes so a * fence is used to stall all requests associated with this guc_id until the * corresponding G2H returns indicating the guc_id has been deregistered. * * submission_state.guc_ids: * Unique number associated with private GuC context data passed in during * context registration / submission / deregistration. 64k available. Simple ida * is used for allocation. * * Stealing guc_ids: * If no guc_ids are available they can be stolen from another context at * request creation time if that context is unpinned. If a guc_id can't be found * we punt this problem to the user as we believe this is near impossible to hit * during normal use cases. * * Locking: * In the GuC submission code we have 3 basic spin locks which protect * everything. Details about each below. * * sched_engine->lock * This is the submission lock for all contexts that share an i915 schedule * engine (sched_engine), thus only one of the contexts which share a * sched_engine can be submitting at a time. Currently only one sched_engine is * used for all of GuC submission but that could change in the future. * * guc->submission_state.lock * Global lock for GuC submission state. Protects guc_ids and destroyed contexts * list. * * ce->guc_state.lock * Protects everything under ce->guc_state. Ensures that a context is in the * correct state before issuing a H2G. e.g. We don't issue a schedule disable * on a disabled context (bad idea), we don't issue a schedule enable when a * schedule disable is in flight, etc... Also protects list of inflight requests * on the context and the priority management state. Lock is individual to each * context. * * Lock ordering rules: * sched_engine->lock -> ce->guc_state.lock * guc->submission_state.lock -> ce->guc_state.lock * * Reset races: * When a full GT reset is triggered it is assumed that some G2H responses to * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be * fatal as we do certain operations upon receiving a G2H (e.g. destroy * contexts, release guc_ids, etc...). When this occurs we can scrub the * context state and cleanup appropriately, however this is quite racey. * To avoid races, the reset code must disable submission before scrubbing for * the missing G2H, while the submission code must check for submission being * disabled and skip sending H2Gs and updating context states when it is. Both * sides must also make sure to hold the relevant locks.
*/
/* * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous * per the GuC submission interface. A different allocation algorithm is used * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to * partition the guc_id space. We believe the number of multi-lrc contexts in * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for * multi-lrc.
*/ #define NUMBER_MULTI_LRC_GUC_ID(guc) \
((guc)->submission_state.num_guc_ids / 16)
/* * Below is a set of functions which control the GuC scheduling state which * require a lock.
*/ #define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0) #define SCHED_STATE_DESTROYED BIT(1) #define SCHED_STATE_PENDING_DISABLE BIT(2) #define SCHED_STATE_BANNED BIT(3) #define SCHED_STATE_ENABLED BIT(4) #define SCHED_STATE_PENDING_ENABLE BIT(5) #define SCHED_STATE_REGISTERED BIT(6) #define SCHED_STATE_POLICY_REQUIRED BIT(7) #define SCHED_STATE_CLOSED BIT(8) #define SCHED_STATE_BLOCKED_SHIFT 9 #define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT) #define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
/* * Kernel contexts can have SCHED_STATE_REGISTERED after suspend. * A context close can race with the submission path, so SCHED_STATE_CLOSED * can be set immediately before we try to register.
*/ #define SCHED_STATE_VALID_INIT \
(SCHED_STATE_BLOCKED_MASK | \
SCHED_STATE_CLOSED | \
SCHED_STATE_REGISTERED)
/* * When using multi-lrc submission a scratch memory area is reserved in the * parent's context state for the process descriptor, work queue, and handshake * between the parent + children contexts to insert safe preemption points * between each of the BBs. Currently the scratch area is sized to a page. * * The layout of this scratch area is below: * 0 guc_process_desc * + sizeof(struct guc_process_desc) child go * + CACHELINE_BYTES child join[0] * ... * + CACHELINE_BYTES child join[n - 1] * ... unused * PARENT_SCRATCH_SIZE / 2 work queue start * ... work queue * PARENT_SCRATCH_SIZE - 1 work queue end
*/ #define WQ_SIZE (PARENT_SCRATCH_SIZE / 2) #define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
/* * Need to subtract LRC_STATE_OFFSET here as the * parallel.guc.parent_page is the offset into ce->state while * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
*/ return (struct parent_scratch *)
(ce->lrc_reg_state +
((__get_parent_scratch_offset(ce) -
LRC_STATE_OFFSET) / sizeof(u32)));
}
static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
{ /* * Check for space in work queue. Caching a value of head pointer in * intel_context structure in order reduce the number accesses to shared * GPU memory which may be across a PCIe bus.
*/ #define AVAILABLE_SPACE \
CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE) if (wqi_size > AVAILABLE_SPACE) {
ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
if (wqi_size > AVAILABLE_SPACE) return NULL;
} #undef AVAILABLE_SPACE
/* * We always loop when a send requires a reply (i.e. g2h_len_dw > 0), * so we don't handle the case where we don't get a reply because we * aborted the send due to the channel being busy.
*/
GEM_BUG_ON(g2h_len_dw && !loop);
if (g2h_len_dw)
atomic_inc(&guc->outstanding_submission_g2h);
ret = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop); if (ret && g2h_len_dw)
atomic_dec(&guc->outstanding_submission_g2h);
return ret;
}
int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
atomic_t *wait_var, bool interruptible, long timeout)
{ constint state = interruptible ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
might_sleep();
GEM_BUG_ON(timeout < 0);
if (!atomic_read(wait_var)) return 0;
if (!timeout) return -ETIME;
for (;;) {
prepare_to_wait(&guc->ct.wq, &wait, state);
if (!atomic_read(wait_var)) break;
if (signal_pending_state(state, current)) {
timeout = -EINTR; break;
}
/* * Corner case where requests were sitting in the priority list or a * request resubmitted after the context was banned.
*/ if (unlikely(!intel_context_is_schedulable(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine); return 0;
}
if (context_policy_required(ce)) {
err = guc_context_policy_init_v70(ce, false); if (err) return err;
}
spin_lock(&ce->guc_state.lock);
/* * The request / context will be run on the hardware when scheduling * gets enabled in the unblock. For multi-lrc we still submit the * context to move the LRC tails.
*/ if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce))) goto out;
/* * Without multi-lrc KMD does the submission step (moving the * lrc tail) so enabling scheduling is sufficient to submit the * context. This isn't the case in multi-lrc submission as the * GuC needs to move the tails, hence the need for another H2G * to submit a multi-lrc context after enabling scheduling.
*/ if (intel_context_is_parent(ce)) {
action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
err = intel_guc_send_nb(guc, action, len - 1, 0);
}
} elseif (!enabled) {
clr_context_pending_enable(ce);
intel_context_put(ce);
} if (likely(!err))
trace_i915_request_guc_submit(rq);
/* Ensure context is in correct state updating work queue */
GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
/* Insert NOOP if this work queue item will wrap the tail pointer. */ if (wqi_size > wq_space_until_wrap(ce)) {
ret = guc_wq_noop_append(ce); if (ret) return ret;
}
wqi = get_wq_pointer(ce, wqi_size); if (!wqi) return -EBUSY;
/* * We expect the front end (execbuf IOCTL) to set this flag on the last * request generated from a multi-BB submission. This indicates to the * backend (GuC interface) that we should submit this context thus * submitting all the requests generated in parallel.
*/ return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
!intel_context_is_schedulable(ce);
}
if (is_multi_lrc_rq(rq)) { /* * We need to coalesce all multi-lrc requests in * a relationship into a single H2G. We are * guaranteed that all of these requests will be * submitted sequentially.
*/ if (multi_lrc_submit(rq)) {
submit = true; goto register_context;
}
} else {
submit = true;
}
}
xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) { /* * Corner case where the ref count on the object is zero but and * deregister G2H was lost. In this case we don't touch the ref * count and finish the destroy of the context.
*/ bool do_put = kref_get_unless_zero(&ce->ref);
xa_unlock(&guc->context_lookup);
if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
(cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) { /* successful cancel so jump straight to close it */
intel_context_sched_disable_unpin(ce);
}
spin_lock(&ce->guc_state.lock);
/* * Once we are at this point submission_disabled() is guaranteed * to be visible to all callers who set the below flags (see above * flush and flushes in reset_prepare). If submission_disabled() * is set, the caller shouldn't set these flags.
*/
if (pending_enable || destroyed || deregister) {
decr_outstanding_submission_g2h(guc); if (deregister)
guc_signal_context_fence(ce); if (destroyed) {
intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
} if (pending_enable || deregister)
intel_context_put(ce);
}
/* Not mutualy exclusive with above if statement. */ if (pending_disable) {
guc_signal_context_fence(ce); if (banned) {
guc_cancel_context_requests(ce);
intel_engine_signal_breadcrumbs(ce->engine);
}
intel_context_sched_disable_unpin(ce);
decr_outstanding_submission_g2h(guc);
if (do_put)
intel_context_put(ce);
xa_lock(&guc->context_lookup);
}
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
/* * GuC stores busyness stats for each engine at context in/out boundaries. A * context 'in' logs execution start time, 'out' adds in -> out delta to total. * i915/kmd accesses 'start', 'total' and 'context id' from memory shared with * GuC. * * __i915_pmu_event_read samples engine busyness. When sampling, if context id * is valid (!= ~0) and start is non-zero, the engine is considered to be * active. For an active engine total busyness = total + (now - start), where * 'now' is the time at which the busyness is sampled. For inactive engine, * total busyness = total. * * All times are captured from GUCPMTIMESTAMP reg and are in gt clock domain. * * The start and total values provided by GuC are 32 bits and wrap around in a * few minutes. Since perf pmu provides busyness as 64 bit monotonically * increasing ns values, there is a need for this implementation to account for * overflows and extend the GuC provided values to 64 bits before returning * busyness to the user. In order to do that, a worker runs periodically at * frequency = 1/8th the time it takes for the timestamp to wrap (i.e. once in * 27 seconds for a gt clock frequency of 19.2 MHz).
*/
if (new_start == lower_32_bits(*prev_start)) return;
/* * When gt is unparked, we update the gt timestamp and start the ping * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt * is unparked, all switched in contexts will have a start time that is * within +/- POLL_TIME_CLKS of the most recent gt_stamp. * * If neither gt_stamp nor new_start has rolled over, then the * gt_stamp_hi does not need to be adjusted, however if one of them has * rolled over, we need to adjust gt_stamp_hi accordingly. * * The below conditions address the cases of new_start rollover and * gt_stamp_last rollover respectively.
*/ if (new_start < gt_stamp_last &&
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
gt_stamp_hi++;
/* * GuC updates shared memory and KMD reads it. Since this is not synchronized, * we run into a race where the value read is inconsistent. Sometimes the * inconsistency is in reading the upper MSB bytes of the last_in value when * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper * 24 bits are zero. Since these are non-zero values, it is non-trivial to * determine validity of these values. Instead we read the values multiple times * until they are consistent. In test runs, 3 attempts results in consistent * values. The upper bound is set to 6 attempts and may need to be tuned as per * any new occurrences.
*/ staticvoid __get_engine_usage_record(struct intel_engine_cs *engine,
u32 *last_in, u32 *id, u32 *total)
{ struct iosys_map rec_map = intel_guc_engine_usage_record_map(engine); int i = 0;
/* * Instead of adjusting the total for overflow, just add the * difference from previous sample stats->total_gt_clks
*/ if (total && total != ~0U) {
stats->total_gt_clks += (u32)(total - stats->prev_total);
stats->prev_total = total;
}
}
/* * Unlike the execlist mode of submission total and active times are in terms of * gt clocks. The *now parameter is retained to return the cpu time at which the * busyness was sampled.
*/ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
{ struct intel_engine_guc_stats stats_saved, *stats = &engine->stats.guc; struct i915_gpu_error *gpu_error = &engine->i915->gpu_error; struct intel_gt *gt = engine->gt; struct intel_guc *guc = gt_to_guc(gt);
u64 total, gt_stamp_saved; unsignedlong flags;
u32 reset_count; bool in_reset;
intel_wakeref_t wakeref;
spin_lock_irqsave(&guc->timestamp.lock, flags);
/* * If a reset happened, we risk reading partially updated engine * busyness from GuC, so we just use the driver stored copy of busyness. * Synchronize with gt reset using reset_count and the * I915_RESET_BACKOFF flag. Note that reset flow updates the reset_count * after I915_RESET_BACKOFF flag, so ensure that the reset_count is * usable by checking the flag afterwards.
*/
reset_count = i915_reset_count(gpu_error);
in_reset = test_bit(I915_RESET_BACKOFF, >->reset.flags);
*now = ktime_get();
/* * The active busyness depends on start_gt_clk and gt_stamp. * gt_stamp is updated by i915 only when gt is awake and the * start_gt_clk is derived from GuC state. To get a consistent * view of activity, we query the GuC state only if gt is awake.
*/
wakeref = in_reset ? NULL : intel_gt_pm_get_if_awake(gt); if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp; /* * Update gt_clks, then gt timestamp to simplify the 'gt_stamp - * start_gt_clk' calculation below for active engines.
*/
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt, wakeref); if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
guc->timestamp.gt_stamp = gt_stamp_saved;
}
}
total = intel_gt_clock_interval_to_ns(gt, stats->total_gt_clks); if (stats->running) {
u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
total += intel_gt_clock_interval_to_ns(gt, clk);
}
staticvoid guc_cancel_busyness_worker(struct intel_guc *guc)
{ /* * There are many different call stacks that can get here. Some of them * hold the reset mutex. The busyness worker also attempts to acquire the * reset mutex. Synchronously flushing a worker thread requires acquiring * the worker mutex. Lockdep sees this as a conflict. It thinks that the * flush can deadlock because it holds the worker mutex while waiting for * the reset mutex, but another thread is holding the reset mutex and might * attempt to use other worker functions. * * In practice, this scenario does not exist because the busyness worker * does not block waiting for the reset mutex. It does a try-lock on it and * immediately exits if the lock is already held. Unfortunately, the mutex * in question (I915_RESET_BACKOFF) is an i915 implementation which has lockdep * annotation but not to the extent of explaining the 'might lock' is also a * 'does not need to lock'. So one option would be to add more complex lockdep * annotations to ignore the issue (if at all possible). A simpler option is to * just not flush synchronously when a rest in progress. Given that the worker * will just early exit and re-schedule itself anyway, there is no advantage * to running it immediately. * * If a reset is not in progress, then the synchronous flush may be required. * As noted many call stacks lead here, some during suspend and driver unload * which do require a synchronous flush to make sure the worker is stopped * before memory is freed. * * Trying to pass a 'need_sync' or 'in_reset' flag all the way down through * every possible call stack is unfeasible. It would be too intrusive to many * areas that really don't care about the GuC backend. However, there is the * I915_RESET_BACKOFF flag and the gt->reset.mutex can be tested for is_locked. * So just use those. Note that testing both is required due to the hideously * complex nature of the i915 driver's reset code paths. * * And note that in the case of a reset occurring during driver unload * (wedged_on_fini), skipping the cancel in reset_prepare/reset_fini (when the * reset flag/mutex are set) is fine because there is another explicit cancel in * intel_guc_submission_fini (when the reset flag/mutex are not).
*/ if (mutex_is_locked(&guc_to_gt(guc)->reset.mutex) ||
test_bit(I915_RESET_BACKOFF, &guc_to_gt(guc)->reset.flags))
cancel_delayed_work(&guc->timestamp.work); else
cancel_delayed_work_sync(&guc->timestamp.work);
}
/* * If resetting a running context, accumulate the active * time as well since there will be no context switch.
*/ if (stats->running) {
u64 clk = guc->timestamp.gt_stamp - stats->start_gt_clk;
/* * Ideally the busyness worker should take a gt pm wakeref because the * worker only needs to be active while gt is awake. However, the * gt_park path cancels the worker synchronously and this complicates * the flow if the worker is also running at the same time. The cancel * waits for the worker and when the worker releases the wakeref, that * would call gt_park and would lead to a deadlock. * * The resolution is to take the global pm wakeref if runtime pm is * already active. If not, we don't need to update the busyness stats as * the stats would already be updated when the gt was parked. * * Note: * - We do not requeue the worker if we cannot take a reference to runtime * pm since intel_guc_busyness_unpark would requeue the worker in the * resume path. * * - If the gt was parked longer than time taken for GT timestamp to roll * over, we ignore those rollovers since we don't care about tracking * the exact GT time. We only care about roll overs when the gt is * active and running workloads. * * - There is a window of time between gt_park and runtime suspend, * where the worker may run. This is acceptable since the worker will * not find any new data to update busyness.
*/
wakeref = intel_runtime_pm_get_if_active(>->i915->runtime_pm); if (!wakeref) return;
/* * Synchronize with gt reset to make sure the worker does not * corrupt the engine/guc stats. NB: can't actually block waiting * for a reset to complete as the reset requires flushing out * this worker thread if started. So waiting would deadlock.
*/
ret = intel_gt_reset_trylock(gt, &srcu); if (ret) goto err_trylock;
/* Assume no engines are running and set running state to false */
__update_guc_busyness_running_state(guc);
/* * There is a race with suspend flow where the worker runs after suspend * and causes an unclaimed register access warning. Cancel the worker * synchronously here.
*/
guc_cancel_busyness_worker(guc);
/* * Before parking, we should sample engine busyness stats if we need to. * We can skip it if we are less than half a ping from the last time we * sampled the busyness stats.
*/ if (guc->timestamp.last_stat_jiffies &&
!time_after(jiffies, guc->timestamp.last_stat_jiffies +
(guc->timestamp.ping_delay / 2))) return;
spin_lock_irqsave(&guc->sched_engine->lock, flags);
sched_engine->tasklet.callback = guc_submission_tasklet;
wmb(); /* Make sure callback visible */ if (!__tasklet_is_enabled(&sched_engine->tasklet) &&
__tasklet_enable(&sched_engine->tasklet)) {
GEM_BUG_ON(!guc->ct.enabled);
/* And kick in case we missed a new request submission. */
tasklet_hi_schedule(&sched_engine->tasklet);
}
spin_unlock_irqrestore(&guc->sched_engine->lock, flags);
}
/* * We want a simple context + ring to execute the breadcrumb update. * We cannot rely on the context being intact across the GPU hang, * so clear it and rebuild just what we need for the breadcrumb. * All pending requests for this context will be zapped, and any * future request will be after userspace has had the opportunity * to recreate its own state.
*/ if (scrub)
lrc_init_regs(ce, engine, true);
/* Rerun the request; its payload has been neutered (if guilty). */
lrc_update_regs(ce, engine, head);
}
staticvoid guc_engine_reset_prepare(struct intel_engine_cs *engine)
{ /* * Wa_22011802037: In addition to stopping the cs, we need * to wait for any pending mi force wakeups
*/ if (intel_engine_reset_needs_wa_22011802037(engine->gt)) {
intel_engine_stop_cs(engine);
intel_engine_wait_for_pending_mi_fw(engine);
}
}
/* Push the request back into the queue for later resubmission. */
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
pl = i915_sched_lookup_priolist(sched_engine, prio);
}
GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
/* * GuC will implicitly mark the context as non-schedulable when it sends * the reset notification. Make sure our state reflects this change. The * context will be marked enabled on resubmission.
*/
spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/* * For each context in the relationship find the hanging request * resetting each context / request as needed
*/ for (i = 0; i < number_children + 1; ++i) { if (!intel_context_is_pinned(ce)) goto next_context;
guilty = false;
rq = intel_context_get_active_request(ce); if (!rq) {
head = ce->ring->tail; goto out_replay;
}
if (i915_request_started(rq))
guilty = stalled & ce->engine->mask;
GEM_BUG_ON(i915_active_is_idle(&ce->active));
head = intel_ring_wrap(ce->ring, rq->head);
__i915_request_reset(rq, guilty);
i915_request_put(rq);
out_replay:
guc_reset_state(ce, head, guilty);
next_context: if (i != number_children)
ce = list_next_entry(ce, parallel.child_link);
}
/* Can be called during boot if GuC fails to load */ if (!sched_engine) return;
/* * Before we call engine->cancel_requests(), we should have exclusive * access to the submission state. This is arranged for us by the * caller disabling the interrupt generation, the tasklet and other * threads that may then access the same state, giving us a free hand * to reset state. However, we still need to let lockdep be aware that * we know this state may be accessed in hardirq context, so we * disable the irq around this manipulation and we want to keep * the spinlock focused on its duties and not accidentally conflate * coverage to the submission's irq state. (Similarly, although we * shouldn't need to disable irq around the manipulation of the * submission's irq state, we also wish to remind ourselves that * it is irq state.)
*/
spin_lock_irqsave(&sched_engine->lock, flags);
/* Flush the queued requests to the timeline list (for retiring). */ while ((rb = rb_first_cached(&sched_engine->queue))) { struct i915_priolist *p = to_priolist(rb);
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
/* * Wedged GT won't respond to any TLB invalidation request. Simply * release all the blocked waiters.
*/
wake_up_all_tlb_invalidate(guc);
}
void intel_guc_submission_reset_finish(struct intel_guc *guc)
{ int outstanding;
/* Reset called during driver load or during wedge? */ if (unlikely(!guc_submission_initialized(guc) ||
!intel_guc_is_fw_running(guc) ||
intel_gt_is_wedged(guc_to_gt(guc)))) { return;
}
/* * Technically possible for either of these values to be non-zero here, * but very unlikely + harmless. Regardless let's add an error so we can * see in CI if this happens frequently / a precursor to taking down the * machine.
*/
outstanding = atomic_read(&guc->outstanding_submission_g2h); if (outstanding)
guc_err(guc, "Unexpected outstanding GuC to Host response(s) in reset finish: %d\n",
outstanding);
atomic_set(&guc->outstanding_submission_g2h, 0);
/* * The full GT reset will have cleared the TLB caches and flushed the * G2H message queue; we can release all the blocked waiters.
*/
wake_up_all_tlb_invalidate(guc);
}
staticint init_tlb_lookup(struct intel_guc *guc)
{ struct intel_guc_tlb_wait *wait; int err;
if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) return 0;
xa_init_flags(&guc->tlb_lookup, XA_FLAGS_ALLOC);
wait = kzalloc(sizeof(*wait), GFP_KERNEL); if (!wait) return -ENOMEM;
init_waitqueue_head(&wait->wq);
/* Preallocate a shared id for use under memory pressure. */
err = xa_alloc_cyclic_irq(&guc->tlb_lookup, &guc->serial_slot, wait,
xa_limit_32b, &guc->next_seqno, GFP_KERNEL); if (err < 0) {
kfree(wait); return err;
}
if (!HAS_GUC_TLB_INVALIDATION(guc_to_gt(guc)->i915)) return;
wait = xa_load(&guc->tlb_lookup, guc->serial_slot); if (wait && wait->busy)
guc_err(guc, "Unexpected busy item in tlb_lookup on fini\n");
kfree(wait);
xa_destroy(&guc->tlb_lookup);
}
/* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time.
*/ int intel_guc_submission_init(struct intel_guc *guc)
{ struct intel_gt *gt = guc_to_gt(guc); int ret;
if (guc->submission_initialized) return 0;
if (GUC_SUBMIT_VER(guc) < MAKE_GUC_VER(1, 0, 0)) {
ret = guc_lrc_desc_pool_create_v69(guc); if (ret) return ret;
}
ret = init_tlb_lookup(guc); if (ret) goto destroy_pool;
guc->submission_state.guc_ids_bitmap =
bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL); if (!guc->submission_state.guc_ids_bitmap) {
ret = -ENOMEM; goto destroy_tlb;
}
staticint guc_bypass_tasklet_submit(struct intel_guc *guc, struct i915_request *rq)
{ int ret = 0;
__i915_request_submit(rq);
trace_i915_request_in(rq, 0);
if (is_multi_lrc_rq(rq)) { if (multi_lrc_submit(rq)) {
ret = guc_wq_item_append(guc, rq); if (!ret)
ret = guc_add_request(guc, rq);
}
} else {
guc_set_lrc_tail(rq);
ret = guc_add_request(guc, rq);
}
if (unlikely(ret == -EPIPE))
disable_submission(guc);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.