/** * enum hl_cs_wait_status - cs wait status * @CS_WAIT_STATUS_BUSY: cs was not completed yet * @CS_WAIT_STATUS_COMPLETED: cs completed * @CS_WAIT_STATUS_GONE: cs completed but fence is already gone
*/ enum hl_cs_wait_status {
CS_WAIT_STATUS_BUSY,
CS_WAIT_STATUS_COMPLETED,
CS_WAIT_STATUS_GONE
};
/* * Data used while handling wait/timestamp nodes. * The purpose of this struct is to store the needed data for both operations * in one variable instead of passing large number of arguments to functions.
*/ struct wait_interrupt_data { struct hl_user_interrupt *interrupt; struct hl_mmap_mem_buf *buf; struct hl_mem_mgr *mmg; struct hl_cb *cq_cb;
u64 ts_handle;
u64 ts_offset;
u64 cq_handle;
u64 cq_offset;
u64 target_value;
u64 intr_timeout_us;
};
/* * CS outcome store supports the following operations: * push outcome - store a recent CS outcome in the store * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store * It uses 2 lists: used list and free list. * It has a pre-allocated amount of nodes, each node stores * a single CS outcome. * Initially, all the nodes are in the free list. * On push outcome, a node (any) is taken from the free list, its * information is filled in, and the node is moved to the used list. * It is possible, that there are no nodes left in the free list. * In this case, we will lose some information about old outcomes. We * will pop the OLDEST node from the used list, and make it free. * On pop, the node is searched for in the used list (using a search * index). * If found, the node is then removed from the used list, and moved * back to the free list. The outcome data that the node contained is * returned back to the user.
*/
dev_crit(hdev->dev, "SOB release shouldn't be called here, q_idx: %d, sob_id: %d\n",
hw_sob->q_idx, hw_sob->sob_id);
}
void hw_sob_put(struct hl_hw_sob *hw_sob)
{ if (hw_sob)
kref_put(&hw_sob->kref, hl_sob_reset);
}
staticvoid hw_sob_put_err(struct hl_hw_sob *hw_sob)
{ if (hw_sob)
kref_put(&hw_sob->kref, hl_sob_reset_error);
}
void hw_sob_get(struct hl_hw_sob *hw_sob)
{ if (hw_sob)
kref_get(&hw_sob->kref);
}
/** * hl_gen_sob_mask() - Generates a sob mask to be used in a monitor arm packet * @sob_base: sob base id * @sob_mask: sob user mask, each bit represents a sob offset from sob base * @mask: generated mask * * Return: 0 if given parameters are valid
*/ int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask)
{ int i;
if (sob_mask == 0) return -EINVAL;
if (sob_mask == 0x1) {
*mask = ~(1 << (sob_base & 0x7));
} else { /* find msb in order to verify sob range is valid */ for (i = BITS_PER_BYTE - 1 ; i >= 0 ; i--) if (BIT(i) & sob_mask) break;
if (i > (HL_MAX_SOBS_PER_MONITOR - (sob_base & 0x7) - 1)) return -EINVAL;
bool cs_needs_completion(struct hl_cs *cs)
{ /* In case this is a staged CS, only the last CS in sequence should * get a completion, any non staged CS will always get a completion
*/ if (cs->staged_cs && !cs->staged_last) returnfalse;
returntrue;
}
bool cs_needs_timeout(struct hl_cs *cs)
{ /* In case this is a staged CS, only the first CS in sequence should * get a timeout, any non staged CS will always get a timeout
*/ if (cs->staged_cs && !cs->staged_first) returnfalse;
returntrue;
}
staticbool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
{ /* Patched CB is created for external queues jobs */ return (job->queue_type == QUEUE_TYPE_EXT);
}
/* * cs_parser - parse the user command submission * * @hpriv : pointer to the private data of the fd * @job : pointer to the job that holds the command submission info * * The function parses the command submission of the user. It calls the * ASIC specific parser, which returns a list of memory blocks to send * to the device as different command buffers *
*/ staticint cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
{ struct hl_device *hdev = hpriv->hdev; struct hl_cs_parser parser; int rc;
if (is_cb_patched(hdev, job)) { if (!rc) {
job->patched_cb = parser.patched_cb;
job->job_cb_size = parser.patched_cb_size;
job->contains_dma_pkt = parser.contains_dma_pkt;
atomic_inc(&job->patched_cb->cs_cnt);
}
/* * Whether the parsing worked or not, we don't need the * original CB anymore because it was already parsed and * won't be accessed again for this CS
*/
atomic_dec(&job->user_cb->cs_cnt);
hl_cb_put(job->user_cb);
job->user_cb = NULL;
} elseif (!rc) {
job->job_cb_size = job->user_cb_size;
}
if (is_cb_patched(hdev, job)) {
hl_userptr_delete_list(hdev, &job->userptr_list);
/* * We might arrive here from rollback and patched CB wasn't * created, so we need to check it's not NULL
*/ if (job->patched_cb) {
atomic_dec(&job->patched_cb->cs_cnt);
hl_cb_put(job->patched_cb);
}
}
/* For H/W queue jobs, if a user CB was allocated by driver, * the user CB isn't released in cs_parser() and thus should be * released here. This is also true for INT queues jobs which were * allocated by driver.
*/ if (job->is_kernel_allocated_cb &&
(job->queue_type == QUEUE_TYPE_HW || job->queue_type == QUEUE_TYPE_INT)) {
atomic_dec(&job->user_cb->cs_cnt);
hl_cb_put(job->user_cb);
}
/* * This is the only place where there can be multiple threads * modifying the list at the same time
*/
spin_lock(&cs->job_lock);
list_del(&job->cs_node);
spin_unlock(&cs->job_lock);
hl_debugfs_remove_job(hdev, job);
/* We decrement reference only for a CS that gets completion * because the reference was incremented only for this kind of CS * right before it was scheduled. * * In staged submission, only the last CS marked as 'staged_last' * gets completion, hence its release function will be called from here. * As for all the rest CS's in the staged submission which do not get * completion, their CS reference will be decremented by the * 'staged_last' CS during the CS release flow. * All relevant PQ CI counters will be incremented during the CS release * flow by calling 'hl_hw_queue_update_ci'.
*/ if (cs_needs_completion(cs) &&
(job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW)) {
/* In CS based completions, the timestamp is already available, * so no need to extract it from job
*/ if (hdev->asic_prop.completion_mode == HL_COMPLETION_MODE_JOB)
cs->completion_timestamp = job->timestamp;
cs_put(cs);
}
hl_cs_job_put(job);
}
/* * hl_staged_cs_find_first - locate the first CS in this staged submission * * @hdev: pointer to device structure * @cs_seq: staged submission sequence number * * @note: This function must be called under 'hdev->cs_mirror_lock' * * Find and return a CS pointer with the given sequence
*/ struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq)
{ struct hl_cs *cs;
/* * staged_cs_get - get CS reference if this CS is a part of a staged CS * * @hdev: pointer to device structure * @cs: current CS * @cs_seq: staged submission sequence number * * Increment CS reference for every CS in this staged submission except for * the CS which get completion.
*/ staticvoid staged_cs_get(struct hl_device *hdev, struct hl_cs *cs)
{ /* Only the last CS in this staged submission will get a completion. * We must increment the reference for all other CS's in this * staged submission. * Once we get a completion we will release the whole staged submission.
*/ if (!cs->staged_last)
cs_get(cs);
}
/* * staged_cs_put - put a CS in case it is part of staged submission * * @hdev: pointer to device structure * @cs: CS to put * * This function decrements a CS reference (for a non completion CS)
*/ staticvoid staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
{ /* We release all CS's in a staged submission except the last * CS which we have never incremented its reference.
*/ if (!cs_needs_completion(cs))
cs_put(cs);
}
/* We need to handle tdr only once for the complete staged submission. * Hence, we choose the CS that reaches this function first which is * the CS marked as 'staged_last'. * In case single staged cs was submitted which has both first and last * indications, then "cs_find_first" below will return NULL, since we * removed the cs node from the list before getting here, * in such cases just continue with the cs to cancel it's TDR work.
*/ if (cs->staged_cs && cs->staged_last) {
first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); if (first_cs)
cs = first_cs;
}
spin_unlock(&hdev->cs_mirror_lock);
/* Don't cancel TDR in case this CS was timedout because we might be * running from the TDR context
*/ if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT) return;
if (cs->tdr_active)
cancel_delayed_work_sync(&cs->work_tdr);
spin_lock(&hdev->cs_mirror_lock);
/* queue TDR for next CS */
list_for_each_entry(iter, &hdev->cs_mirror_list, mirror_node) if (cs_needs_timeout(iter)) {
next = iter; break;
}
/* * force_complete_multi_cs - complete all contexts that wait on multi-CS * * @hdev: pointer to habanalabs device structure
*/ staticvoid force_complete_multi_cs(struct hl_device *hdev)
{ int i;
for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { struct multi_cs_completion *mcs_compl;
mcs_compl = &hdev->multi_cs_completion[i];
spin_lock(&mcs_compl->lock);
if (!mcs_compl->used) {
spin_unlock(&mcs_compl->lock); continue;
}
/* when calling force complete no context should be waiting on * multi-cS. * We are calling the function as a protection for such case * to free any pending context and print error message
*/
dev_err(hdev->dev, "multi-CS completion context %d still waiting when calling force completion\n",
i);
complete_all(&mcs_compl->completion);
spin_unlock(&mcs_compl->lock);
}
}
/* * complete_multi_cs - complete all waiting entities on multi-CS * * @hdev: pointer to habanalabs device structure * @cs: CS structure * The function signals a waiting entity that has an overlapping stream masters * with the completed CS. * For example: * - a completed CS worked on stream master QID 4, multi CS completion * is actively waiting on stream master QIDs 3, 5. don't send signal as no * common stream master QID * - a completed CS worked on stream master QID 4, multi CS completion * is actively waiting on stream master QIDs 3, 4. send signal as stream * master QID 4 is common
*/ staticvoid complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
{ struct hl_fence *fence = cs->fence; int i;
/* in case of multi CS check for completion only for the first CS */ if (cs->staged_cs && !cs->staged_first) return;
for (i = 0; i < MULTI_CS_MAX_USER_CTX; i++) { struct multi_cs_completion *mcs_compl;
mcs_compl = &hdev->multi_cs_completion[i]; if (!mcs_compl->used) continue;
spin_lock(&mcs_compl->lock);
/* * complete if: * 1. still waiting for completion * 2. the completed CS has at least one overlapping stream * master with the stream masters in the completion
*/ if (mcs_compl->used &&
(fence->stream_master_qid_map &
mcs_compl->stream_master_qid_map)) { /* extract the timestamp only of first completed CS */ if (!mcs_compl->timestamp)
mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
complete_all(&mcs_compl->completion);
/* * Setting mcs_handling_done inside the lock ensures * at least one fence have mcs_handling_done set to * true before wait for mcs finish. This ensures at * least one CS will be set as completed when polling * mcs fences.
*/
fence->mcs_handling_done = true;
}
spin_unlock(&mcs_compl->lock);
} /* In case CS completed without mcs completion initialized */
fence->mcs_handling_done = true;
}
staticinlinevoid cs_release_sob_reset_handler(struct hl_device *hdev, struct hl_cs *cs, struct hl_cs_compl *hl_cs_cmpl)
{ /* Skip this handler if the cs wasn't submitted, to avoid putting * the hw_sob twice, since this case already handled at this point, * also skip if the hw_sob pointer wasn't set.
*/ if (!hl_cs_cmpl->hw_sob || !cs->submitted) return;
spin_lock(&hl_cs_cmpl->lock);
/* * we get refcount upon reservation of signals or signal/wait cs for the * hw_sob object, and need to put it when the first staged cs * (which contains the encaps signals) or cs signal/wait is completed.
*/ if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT) ||
(hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT) ||
(!!hl_cs_cmpl->encaps_signals)) {
dev_dbg(hdev->dev, "CS 0x%llx type %d finished, sob_id: %d, sob_val: %u\n",
hl_cs_cmpl->cs_seq,
hl_cs_cmpl->type,
hl_cs_cmpl->hw_sob->sob_id,
hl_cs_cmpl->sob_val);
hw_sob_put(hl_cs_cmpl->hw_sob);
if (hl_cs_cmpl->type == CS_TYPE_COLLECTIVE_WAIT)
hdev->asic_funcs->reset_sob_group(hdev,
hl_cs_cmpl->sob_group);
}
/* * Although if we reached here it means that all external jobs have * finished, because each one of them took refcnt to CS, we still * need to go over the internal jobs and complete them. Otherwise, we * will have leaked memory and what's worse, the CS object (and * potentially the CTX object) could be released, while the JOB * still holds a pointer to them (but no reference).
*/
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
hl_complete_job(hdev, job);
if (!cs->submitted) { /* * In case the wait for signal CS was submitted, the fence put * occurs in init_signal_wait_cs() or collective_wait_init_cs() * right before hanging on the PQ.
*/ if (cs->type == CS_TYPE_WAIT ||
cs->type == CS_TYPE_COLLECTIVE_WAIT)
hl_fence_put(cs->signal_fence);
goto out;
}
/* Need to update CI for all queue jobs that does not get completion */
hl_hw_queue_update_ci(cs);
/* remove CS from CS mirror list */
spin_lock(&hdev->cs_mirror_lock);
list_del_init(&cs->mirror_node);
spin_unlock(&hdev->cs_mirror_lock);
cs_handle_tdr(hdev, cs);
if (cs->staged_cs) { /* the completion CS decrements reference for the entire * staged submission
*/ if (cs->staged_last) { struct hl_cs *staged_cs, *tmp_cs;
/* A staged CS will be a member in the list only after it * was submitted. We used 'cs_mirror_lock' when inserting * it to list so we will use it again when removing it
*/ if (cs->submitted) {
spin_lock(&hdev->cs_mirror_lock);
list_del(&cs->staged_cs_node);
spin_unlock(&hdev->cs_mirror_lock);
}
/* decrement refcount to handle when first staged cs * with encaps signals is completed.
*/ if (hl_cs_cmpl->encaps_signals)
kref_put(&hl_cs_cmpl->encaps_sig_hdl->refcount,
hl_encaps_release_handle_and_put_ctx);
}
/* We need to mark an error for not submitted because in that case * the hl fence release flow is different. Mainly, we don't need * to handle hw_sob for signal/wait
*/ if (cs->timedout)
cs->fence->error = -ETIMEDOUT; elseif (cs->aborted)
cs->fence->error = -EIO; elseif (!cs->submitted)
cs->fence->error = -EBUSY;
if (unlikely(cs->skip_reset_on_timeout)) {
dev_err(hdev->dev, "Command submission %llu completed after %llu (s)\n",
cs->sequence,
div_u64(jiffies - cs->submission_time_jiffies, HZ));
}
switch (cs->type) { case CS_TYPE_SIGNAL:
dev_err(hdev->dev, "Signal command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec); break;
case CS_TYPE_WAIT:
dev_err(hdev->dev, "Wait command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec); break;
case CS_TYPE_COLLECTIVE_WAIT:
dev_err(hdev->dev, "Collective Wait command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec); break;
default:
dev_err(hdev->dev, "Command submission %llu has not finished in %u seconds!\n",
cs->sequence, timeout_sec); break;
}
rc = hl_state_dump(hdev); if (rc)
dev_err(hdev->dev, "Error during system state dump %d\n", rc);
cs_cmpl->cs_seq = ctx->cs_sequence;
other = ctx->cs_pending[cs_cmpl->cs_seq &
(hdev->asic_prop.max_pending_cs - 1)];
if (other && !completion_done(&other->completion)) { /* If the following statement is true, it means we have reached * a point in which only part of the staged submission was * submitted and we don't have enough room in the 'cs_pending' * array for the rest of the submission. * This causes a deadlock because this CS will never be * completed as it depends on future CS's for completion.
*/ if (other->cs_sequence == user_sequence)
dev_crit_ratelimited(hdev->dev, "Staged CS %llu deadlock due to lack of resources",
user_sequence);
dev_dbg_ratelimited(hdev->dev, "Rejecting CS because of too many in-flights CS\n");
atomic64_inc(&ctx->cs_counters.max_cs_in_flight_drop_cnt);
atomic64_inc(&cntr->max_cs_in_flight_drop_cnt);
rc = -EAGAIN; goto free_fence;
}
/* * release_reserved_encaps_signals() - release reserved encapsulated signals. * @hdev: pointer to habanalabs device structure * * Release reserved encapsulated signals which weren't un-reserved, or for which a CS with * encapsulated signals wasn't submitted and thus weren't released as part of CS roll-back. * For these signals need also to put the refcount of the H/W SOB which was taken at the * reservation.
*/ staticvoid release_reserved_encaps_signals(struct hl_device *hdev)
{ struct hl_ctx *ctx = hl_get_compute_ctx(hdev); struct hl_cs_encaps_sig_handle *handle; struct hl_encaps_signals_mgr *mgr;
u32 id;
if (!ctx) return;
mgr = &ctx->sig_mgr;
idr_for_each_entry(&mgr->handles, handle, id) if (handle->cs_seq == ULLONG_MAX)
kref_put(&handle->refcount, hl_encaps_release_handle_and_put_sob_ctx);
if (!skip_wq_flush) {
flush_workqueue(hdev->ts_free_obj_wq);
/* flush all completions before iterating over the CS mirror list in * order to avoid a race with the release functions
*/ for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
flush_workqueue(hdev->cs_cmplt_wq);
}
/* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
cs->ctx->asid, cs->sequence);
cs_rollback(hdev, cs);
cs_put(cs);
}
/* We iterate through the user interrupt requests and waking up all * user threads waiting for interrupt completion. We iterate the * list under a lock, this is why all user threads, once awake, * will wait on the same lock and will release the waiting object upon * unlock.
*/
for (i = 0 ; i < prop->user_interrupt_count ; i++) {
interrupt = &hdev->user_interrupt[i];
wake_pending_user_interrupt_threads(interrupt);
}
/* This must be checked here to prevent out-of-bounds access to * hw_queues_props array
*/ if (chunk->queue_index >= asic->max_queues) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index); return -EINVAL;
}
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
dev_err(hdev->dev, "Queue index %d is not applicable\n",
chunk->queue_index); return -EINVAL;
}
if (hw_queue_prop->binned) {
dev_err(hdev->dev, "Queue index %d is binned out\n",
chunk->queue_index); return -EINVAL;
}
if (hw_queue_prop->driver_only) {
dev_err(hdev->dev, "Queue index %d is restricted for the kernel driver\n",
chunk->queue_index); return -EINVAL;
}
/* When hw queue type isn't QUEUE_TYPE_HW, * USER_ALLOC_CB flag shall be referred as "don't care".
*/ if (hw_queue_prop->type == QUEUE_TYPE_HW) { if (chunk->cs_chunk_flags & HL_CS_CHUNK_FLAGS_USER_ALLOC_CB) { if (!(hw_queue_prop->cb_alloc_flags & CB_ALLOC_USER)) {
dev_err(hdev->dev, "Queue index %d doesn't support user CB\n",
chunk->queue_index); return -EINVAL;
}
*is_kernel_allocated_cb = false;
} else { if (!(hw_queue_prop->cb_alloc_flags &
CB_ALLOC_KERNEL)) {
dev_err(hdev->dev, "Queue index %d doesn't support kernel CB\n",
chunk->queue_index); return -EINVAL;
}
if (num_chunks > HL_MAX_JOBS_PER_CS) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS); return -EINVAL;
}
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC); if (!*cs_chunk_array)
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array), GFP_KERNEL); if (!*cs_chunk_array) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt); return -ENOMEM;
}
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array); return -EFAULT;
}
if (cs->staged_first) { /* Staged CS sequence is the first CS sequence */
INIT_LIST_HEAD(&cs->staged_cs_node);
cs->staged_sequence = cs->sequence;
if (cs->encaps_signals)
cs->encaps_sig_hdl_id = encaps_signal_handle;
} else { /* User sequence will be validated in 'hl_hw_queue_schedule_cs' * under the cs_mirror_lock
*/
cs->staged_sequence = sequence;
}
/* Increment CS reference if needed */
staged_cs_get(hdev, cs);
cs->staged_cs = true;
return 0;
}
static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
{ int i;
for (i = 0; i < hdev->stream_master_qid_arr_size; i++) if (qid == hdev->stream_master_qid_arr[i]) return BIT(i);
/* If this is a staged submission we must return the staged sequence * rather than the internal CS sequence
*/ if (cs->staged_cs)
*cs_seq = cs->staged_sequence;
/* Validate ALL the CS chunks before submitting the CS */ for (i = 0 ; i < num_chunks ; i++) { struct hl_cs_chunk *chunk = &cs_chunk_array[i]; enum hl_queue_type queue_type; bool is_kernel_allocated_cb;
/* * store which stream are being used for external/HW * queues of this CS
*/ if (hdev->supports_wait_for_multi_cs)
stream_master_qid_map |=
get_stream_master_qid_mask(hdev,
chunk->queue_index);
}
if (queue_type == QUEUE_TYPE_HW)
using_hw_queues = true;
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb); if (!job) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM; if (is_kernel_allocated_cb) goto release_cb;
/* * Increment CS reference. When CS reference is 0, CS is * done and can be signaled to user and free all its resources * Only increment for JOB on external or H/W queues, because * only for those JOBs we get completion
*/ if (cs_needs_completion(cs) &&
(job->queue_type == QUEUE_TYPE_EXT ||
job->queue_type == QUEUE_TYPE_HW))
cs_get(cs);
hl_debugfs_add_job(hdev, job);
rc = cs_parser(hpriv, job); if (rc) {
atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev, "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
cs->ctx->asid, cs->sequence, job->id, rc); goto free_cs_object;
}
}
/* We allow a CS with any queue type combination as long as it does * not get a completion
*/ if (int_queues_only && cs_needs_completion(cs)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Reject CS %d.%llu since it contains only internal queues jobs and needs completion\n",
cs->ctx->asid, cs->sequence);
rc = -EINVAL; goto free_cs_object;
}
if (using_hw_queues)
INIT_WORK(&cs->finish_work, cs_completion);
/* * store the (external/HW queues) streams used by the CS in the * fence object for multi-CS completion
*/ if (hdev->supports_wait_for_multi_cs)
cs->fence->stream_master_qid_map = stream_master_qid_map;
rc = hl_hw_queue_schedule_cs(cs); if (rc) { if (rc != -EAGAIN)
dev_err(hdev->dev, "Failed to submit CS %d.%llu to H/W queues, error %d\n",
cs->ctx->asid, cs->sequence, rc); goto free_cs_object;
}
release_cb:
atomic_dec(&cb->cs_cnt);
hl_cb_put(cb);
free_cs_object:
cs_rollback(hdev, cs);
*cs_seq = ULLONG_MAX; /* The path below is both for good and erroneous exits */
put_cs: /* We finished with the CS in this function, so put the ref */
cs_put(cs);
free_cs_chunk_array:
kfree(cs_chunk_array);
out: return rc;
}
if (hdev->supports_ctx_switch)
do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
mutex_lock(&hpriv->restore_phase_mutex);
if (do_ctx_switch) {
rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); if (rc) {
dev_err_ratelimited(hdev->dev, "Failed to switch to context %d, rejecting CS! %d\n",
ctx->asid, rc); /* * If we timedout, or if the device is not IDLE * while we want to do context-switch (-EBUSY), * we need to soft-reset because QMAN is * probably stuck. However, we can't call to * reset here directly because of deadlock, so * need to do it at the very end of this * function
*/ if ((rc == -ETIMEDOUT) || (rc == -EBUSY))
need_soft_reset = true;
mutex_unlock(&hpriv->restore_phase_mutex); goto out;
}
}
/* * hl_cs_signal_sob_wraparound_handler: handle SOB value wrapaound case. * if the SOB value reaches the max value move to the other SOB reserved * to the queue. * @hdev: pointer to device structure * @q_idx: stream queue index * @hw_sob: the H/W SOB used in this signal CS. * @count: signals count * @encaps_sig: tells whether it's reservation for encaps signals or not. * * Note that this function must be called while hw_queues_lock is taken.
*/ int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx, struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig)
/* check for wraparound */ if (prop->next_sob_val + count >= HL_MAX_SOB_VAL) { /* * Decrement as we reached the max value. * The release function won't be called here as we've * just incremented the refcount right before calling this * function.
*/
hw_sob_put_err(sob);
/* * check the other sob value, if it still in use then fail * otherwise make the switch
*/
other_sob_offset = (prop->curr_sob_offset + 1) % HL_RSVD_SOBS;
other_sob = &prop->hw_sob[other_sob_offset];
/* * next_sob_val always points to the next available signal * in the sob, so in encaps signals it will be the next one * after reserving the required amount.
*/ if (encaps_sig)
prop->next_sob_val = count + 1; else
prop->next_sob_val = count;
/* only two SOBs are currently in use */
prop->curr_sob_offset = other_sob_offset;
*hw_sob = other_sob;
/* * check if other_sob needs reset, then do it before using it * for the reservation or the next signal cs. * we do it here, and for both encaps and regular signal cs * cases in order to avoid possible races of two kref_put * of the sob which can occur at the same time if we move the * sob reset(kref_put) to cs_do_release function. * in addition, if we have combination of cs signal and * encaps, and at the point we need to reset the sob there was * no more reservations and only signal cs keep coming, * in such case we need signal_cs to put the refcount and * reset the sob.
*/ if (other_sob->need_reset)
hw_sob_put(other_sob);
if (encaps_sig) { /* set reset indication for the sob */
sob->need_reset = true;
hw_sob_get(other_sob);
}
dev_dbg(hdev->dev, "switched to SOB %d, q_idx: %d\n",
prop->curr_sob_offset, q_idx);
} else {
prop->next_sob_val += count;
}
if (encaps_signals) {
*signal_seq = chunk->encaps_signal_seq; return 0;
}
signal_seq_arr_len = chunk->num_signal_seq_arr;
/* currently only one signal seq is supported */ if (signal_seq_arr_len != 1) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Wait for signal CS supports only one signal CS seq\n"); return -EINVAL;
}
signal_seq_arr = kmalloc_array(signal_seq_arr_len, sizeof(*signal_seq_arr),
GFP_ATOMIC); if (!signal_seq_arr)
signal_seq_arr = kmalloc_array(signal_seq_arr_len, sizeof(*signal_seq_arr),
GFP_KERNEL); if (!signal_seq_arr) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt); return -ENOMEM;
}
size_to_copy = signal_seq_arr_len * sizeof(*signal_seq_arr); if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy signal seq array from user\n");
rc = -EFAULT; goto out;
}
/* currently it is guaranteed to have only one signal seq */
*signal_seq = signal_seq_arr[0];
job = hl_cs_allocate_job(hdev, q_type, true); if (!job) {
atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n"); return -ENOMEM;
}
if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT)
&& cs->encaps_signals)
job->encaps_sig_wait_offset = encaps_signal_offset; /* * No need in parsing, user CB is the patched CB. * We call hl_cb_destroy() out of two reasons - we don't need the CB in * the CB idr anymore and to decrement its refcount as it was * incremented inside hl_cb_kernel_create().
*/
job->patched_cb = job->user_cb;
job->job_cb_size = job->user_cb_size;
hl_cb_destroy(&hdev->kernel_mem_mgr, cb->buf->handle);
/* increment refcount as for external queues we get completion */
cs_get(cs);
if (!hw_queue_prop->supports_sync_stream) {
dev_err(hdev->dev, "Queue index %d does not support sync stream operations\n",
q_idx);
rc = -EINVAL; goto out;
}
/* * Increment the SOB value by count by user request * to reserve those signals * check if the signals amount to reserve is not exceeding the max sob * value, if yes then switch sob.
*/
rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, count, true); if (rc) {
dev_err(hdev->dev, "Failed to switch SOB\n");
hdev->asic_funcs->hw_queues_unlock(hdev);
rc = -EINVAL; goto remove_idr;
} /* set the hw_sob to the handle after calling the sob wraparound handler * since sob could have changed.
*/
handle->hw_sob = hw_sob;
/* store the current sob value for unreserve validity check, and * signal offset support
*/
handle->pre_sob_val = prop->next_sob_val - handle->count;
/* Check if sob_val got out of sync due to other * signal submission requests which were handled * between the reserve-unreserve calls or SOB switch * upon reaching SOB max value.
*/ if (encaps_sig_hdl->pre_sob_val + encaps_sig_hdl->count
!= prop->next_sob_val ||
sob_addr != encaps_sig_hdl->hw_sob->sob_addr) {
dev_err(hdev->dev, "Cannot unreserve signals, SOB val ran out of sync, expected: %u, actual val: %u\n",
encaps_sig_hdl->pre_sob_val,
(prop->next_sob_val - encaps_sig_hdl->count));
if (!hw_queue_prop->supports_sync_stream) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d does not support sync stream operations\n",
q_idx);
rc = -EINVAL; goto free_cs_chunk_array;
}
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) { if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n", q_idx);
rc = -EINVAL; goto free_cs_chunk_array;
}
if (!hdev->nic_ports_mask) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Collective operations not supported when NIC ports are disabled");
rc = -EINVAL; goto free_cs_chunk_array;
}
if (is_wait_cs) {
rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq,
ctx, cs_encaps_signals); if (rc) goto free_cs_chunk_array;
if (cs_encaps_signals) { /* check if cs sequence has encapsulated * signals handle
*/ struct idr *idp;
u32 id;
spin_lock(&ctx->sig_mgr.lock);
idp = &ctx->sig_mgr.handles;
idr_for_each_entry(idp, encaps_sig_hdl, id) { if (encaps_sig_hdl->cs_seq == signal_seq) { /* get refcount to protect removing this handle from idr, * needed when multiple wait cs are used with offset * to wait on reserved encaps signals. * Since kref_put of this handle is executed outside the * current lock, it is possible that the handle refcount * is 0 but it yet to be removed from the list. In this * case need to consider the handle as not valid.
*/ if (kref_get_unless_zero(&encaps_sig_hdl->refcount))
handle_found = true; break;
}
}
spin_unlock(&ctx->sig_mgr.lock);
if (!handle_found) { /* treat as signal CS already finished */
dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
signal_seq);
rc = 0; goto free_cs_chunk_array;
}
/* validate also the signal offset value */ if (chunk->encaps_signal_offset >
encaps_sig_hdl->count) {
dev_err(hdev->dev, "offset(%u) value exceed max reserved signals count(%u)!\n",
chunk->encaps_signal_offset,
encaps_sig_hdl->count);
rc = -EINVAL; goto free_cs_chunk_array;
}
}
sig_fence = hl_ctx_get_fence(ctx, signal_seq); if (IS_ERR(sig_fence)) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Failed to get signal CS with seq 0x%llx\n",
signal_seq);
rc = PTR_ERR(sig_fence); goto free_cs_chunk_array;
}
if (!sig_fence) { /* signal CS already finished */
rc = 0; goto free_cs_chunk_array;
}
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL &&
!staged_cs_with_encaps_signals) {
atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "CS seq 0x%llx is not of a signal/encaps-signal CS\n",
signal_seq);
hl_fence_put(sig_fence);
rc = -EINVAL; goto free_cs_chunk_array;
}
if (completion_done(&sig_fence->completion)) { /* signal CS already finished */
hl_fence_put(sig_fence);
rc = 0; goto free_cs_chunk_array;
}
}
rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout); if (rc) { if (is_wait_cs)
hl_fence_put(sig_fence);
goto free_cs_chunk_array;
}
/* * Save the signal CS fence for later initialization right before * hanging the wait CS on the queue. * for encaps signals case, we save the cs sequence and handle pointer * for later initialization.
*/ if (is_wait_cs) {
cs->signal_fence = sig_fence; /* store the handle pointer, so we don't have to * look for it again, later on the flow * when we need to set SOB info in hw_queue.
*/ if (cs->encaps_signals)
cs->encaps_sig_hdl = encaps_sig_hdl;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.