queue_size_mask = aq->depth - 1;
pi = aq->sq.pc & queue_size_mask;
ctx_id = efa_com_alloc_ctx_id(aq);
/* cmd_id LSBs are the ctx_id and MSBs are entropy bits from pc */
cmd_id = ctx_id & queue_size_mask;
cmd_id |= aq->sq.pc & ~queue_size_mask;
cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
ci = aq->cq.cc & queue_size_mask;
phase = aq->cq.phase;
cqe = &aq->cq.entries[ci];
/* Go over all the completions */ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { /* * Do not read the rest of the completion entry before the * phase bit was validated
*/
dma_rmb();
err = efa_com_handle_single_admin_completion(aq, cqe); if (!err)
comp_cmds++;
aq->cq.cc++;
ci++; if (ci == aq->depth) {
ci = 0;
phase = !phase;
}
staticint efa_com_comp_status_to_errno(u8 comp_status)
{ switch (comp_status) { case EFA_ADMIN_SUCCESS: return 0; case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE: return -ENOMEM; case EFA_ADMIN_UNSUPPORTED_OPCODE: return -EOPNOTSUPP; case EFA_ADMIN_BAD_OPCODE: case EFA_ADMIN_MALFORMED_REQUEST: case EFA_ADMIN_ILLEGAL_PARAMETER: case EFA_ADMIN_UNKNOWN_ERROR: return -EINVAL; default: return -EINVAL;
}
}
while (1) {
spin_lock_irqsave(&aq->cq.lock, flags);
efa_com_handle_admin_completion(aq);
spin_unlock_irqrestore(&aq->cq.lock, flags);
if (comp_ctx->status != EFA_CMD_SUBMITTED) break;
if (time_is_before_jiffies(timeout)) {
ibdev_err_ratelimited(
aq->efa_dev, "Wait for completion (polling) timeout\n"); /* EFA didn't have any completion */
atomic64_inc(&aq->stats.no_completion);
/* * In case the command wasn't completed find out the root cause. * There might be 2 kinds of errors * 1) No completion (timeout reached) * 2) There is completion but the device didn't get any msi-x interrupt.
*/ if (comp_ctx->status == EFA_CMD_SUBMITTED) {
spin_lock_irqsave(&aq->cq.lock, flags);
efa_com_handle_admin_completion(aq);
spin_unlock_irqrestore(&aq->cq.lock, flags);
atomic64_inc(&aq->stats.no_completion);
if (comp_ctx->status == EFA_CMD_COMPLETED)
ibdev_err_ratelimited(
aq->efa_dev, "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (ctx: 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); else
ibdev_err_ratelimited(
aq->efa_dev, "The device didn't send any completion for admin cmd %s(%d) status %d (ctx 0x%p, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
efa_com_cmd_str(comp_ctx->cmd_opcode),
comp_ctx->cmd_opcode, comp_ctx->status,
comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc);
/* * There are two types to wait for completion. * Polling mode - wait until the completion is available. * Async mode - wait on wait queue until the completion is ready * (or the timeout expired). * It is expected that the IRQ called efa_com_handle_admin_completion * to mark the completions.
*/ staticint efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx, struct efa_com_admin_queue *aq)
{ if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state)) return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
/** * efa_com_cmd_exec - Execute admin command * @aq: admin queue. * @cmd: the admin command to execute. * @cmd_size: the command size. * @comp: command completion return entry. * @comp_size: command completion size. * Submit an admin command and then wait until the device will return a * completion. * The completion will be copied into comp. * * @return - 0 on success, negative value on failure.
*/ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, struct efa_admin_aq_entry *cmd,
size_t cmd_size, struct efa_admin_acq_entry *comp,
size_t comp_size)
{ struct efa_comp_ctx *comp_ctx; int err;
might_sleep();
/* In case of queue FULL */
down(&aq->avail_cmds);
for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
atomic64_set(s, 0);
}
/** * efa_com_admin_init - Init the admin and the async queues * @edev: EFA communication layer struct * @aenq_handlers: Those handlers to be called upon event. * * Initialize the admin submission and completion queues. * Initialize the asynchronous events notification queues. * * @return - 0 on success, negative value on failure.
*/ int efa_com_admin_init(struct efa_com_dev *edev, struct efa_aenq_handlers *aenq_handlers)
{ struct efa_com_admin_queue *aq = &edev->aq;
u32 timeout;
u32 dev_sts;
u32 cap; int err;
dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF); if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
ibdev_err(edev->efa_dev, "Device isn't ready, abort com init %#x\n", dev_sts); return -ENODEV;
}
/** * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler * @edev: EFA communication layer struct * * This method goes over the admin completion queue and wakes up * all the pending threads that wait on the commands wait event. * * Note: Should be called after MSI-X interrupt.
*/ void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
{ unsignedlong flags;
/* * efa_handle_specific_aenq_event: * return the handler that is relevant to the specific event group
*/ static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
u16 group)
{ struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group]) return aenq_handlers->handlers[group];
return aenq_handlers->unimplemented_handler;
}
/** * efa_com_aenq_intr_handler - AENQ interrupt handler * @edev: EFA communication layer struct * @data: Data of interrupt handler. * * Go over the async event notification queue and call the proper aenq handler.
*/ void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
{ struct efa_admin_aenq_common_desc *aenq_common; struct efa_com_aenq *aenq = &edev->aenq; struct efa_admin_aenq_entry *aenq_e;
efa_aenq_handler handler_cb;
u32 processed = 0;
u8 phase;
u32 ci;
ci = aenq->cc & (aenq->depth - 1);
phase = aenq->phase;
aenq_e = &aenq->entries[ci]; /* Get first entry */
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */ while ((READ_ONCE(aenq_common->flags) &
EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { /* * Do not read the rest of the completion entry before the * phase bit was validated
*/
dma_rmb();
/* Handle specific event*/
handler_cb = efa_com_get_specific_aenq_cb(edev,
aenq_common->group);
handler_cb(data, aenq_e); /* call the actual event handler*/
/* Get next event entry */
ci++;
processed++;
if (ci == aenq->depth) {
ci = 0;
phase = !phase;
}
aenq_e = &aenq->entries[ci];
aenq_common = &aenq_e->aenq_common_desc;
}
aenq->cc += processed;
aenq->phase = phase;
/* Don't update aenq doorbell if there weren't any processed events */ if (!processed) return;
/* barrier not needed in case of writel */
writel(aenq->cc, edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
}
/* * Make sure the EFA version and the controller version are at least * as the driver expects
*/
ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
ctrl_ver = efa_com_reg_read32(edev,
EFA_REGS_CONTROLLER_VERSION_OFF);
EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
EFA_ADMIN_API_VERSION_MAJOR);
EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
EFA_ADMIN_API_VERSION_MINOR); if (ver < min_ver) {
ibdev_err(edev->efa_dev, "EFA version is lower than the minimal version the driver supports\n"); return -EOPNOTSUPP;
}
EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
EFA_CTRL_MAJOR);
EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
EFA_CTRL_MINOR);
EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
EFA_CTRL_SUB_MINOR); /* Validate the ctrl version without the implementation ID */ if (ctrl_ver_masked < min_ctrl_ver) {
ibdev_err(edev->efa_dev, "EFA ctrl version is lower than the minimal ctrl version the driver supports\n"); return -EOPNOTSUPP;
}
return 0;
}
/** * efa_com_get_dma_width - Retrieve physical dma address width the device * supports. * @edev: EFA communication layer struct * * Retrieve the maximum physical address bits the device can handle. * * @return: > 0 on Success and negative value otherwise.
*/ int efa_com_get_dma_width(struct efa_com_dev *edev)
{
u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF); int width;
for (i = 0; i < timeout; i++) {
val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on) return 0;
ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
msleep(EFA_POLL_INTERVAL_MS);
}
return -ETIME;
}
/** * efa_com_dev_reset - Perform device FLR to the device. * @edev: EFA communication layer struct * @reset_reason: Specify what is the trigger for the reset in case of an error. * * @return - 0 on success, negative value on failure.
*/ int efa_com_dev_reset(struct efa_com_dev *edev, enum efa_regs_reset_reason_types reset_reason)
{
u32 stat, timeout, cap;
u32 reset_val = 0; int err;
stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
/* Go over all the events */ while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) { /* * Do not read the rest of the completion entry before the * phase bit was validated
*/
dma_rmb();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.