/* * Writes to the new ring element must be visible to the hardware * before letting h/w know there is new element to fetch.
*/
dma_wmb();
*ring->ctxt_wp = cpu_to_le64(db);
/* * If execution environment is specified, remove only those devices that * started in them based on ee_mask for the channels as we move on to a * different execution environment
*/ if (data)
ee = *(enum mhi_ee_type *)data;
/* * For the suspend and resume case, this function will get called * without mhi_unregister_controller(). Hence, we need to drop the * references to mhi_dev created for ul and dl channels. We can * be sure that there will be no instances of mhi_dev left after * this.
*/ if (ul_chan) { if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) return 0;
put_device(&ul_chan->mhi_dev->dev);
}
if (dl_chan) { if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) return 0;
put_device(&dl_chan->mhi_dev->dev);
}
dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
mhi_dev->name);
/* Notify the client and remove the device from MHI bus */
device_del(dev);
put_device(dev);
/* Check next channel if it matches */ if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
i++;
mhi_chan++; if (mhi_chan->dir == DMA_TO_DEVICE) {
mhi_dev->ul_chan = mhi_chan;
mhi_dev->ul_chan_id = mhi_chan->chan;
} else {
mhi_dev->dl_chan = mhi_chan;
mhi_dev->dl_chan_id = mhi_chan->chan;
}
get_device(&mhi_dev->dev);
mhi_chan->mhi_dev = mhi_dev;
}
}
/* Channel name is same for both UL and DL */
mhi_dev->name = mhi_chan->name;
dev_set_name(&mhi_dev->dev, "%s_%s",
dev_name(&mhi_cntrl->mhi_dev->dev),
mhi_dev->name);
/* Init wakeup source if available */ if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
device_init_wakeup(&mhi_dev->dev, true);
ret = device_add(&mhi_dev->dev); if (ret)
put_device(&mhi_dev->dev);
}
}
/* * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq() * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt * before handling the IRQs.
*/ if (!mhi_cntrl->mhi_ctxt) {
dev_dbg(&mhi_cntrl->mhi_dev->dev, "mhi_ctxt has been freed\n"); return IRQ_HANDLED;
}
if (pm_state != MHI_PM_SYS_ERR_DETECT) goto exit_intvec;
switch (ee) { case MHI_EE_RDDM: /* proceed if power down is not already in progress */ if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) {
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
mhi_cntrl->ee = ee;
wake_up_all(&mhi_cntrl->state_event);
} break; case MHI_EE_PBL: case MHI_EE_EDL: case MHI_EE_PTHRU:
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
mhi_cntrl->ee = ee;
wake_up_all(&mhi_cntrl->state_event);
mhi_pm_sys_err_handler(mhi_cntrl); break; default:
wake_up_all(&mhi_cntrl->state_event);
mhi_pm_sys_err_handler(mhi_cntrl); break;
}
/* * If it's a DB Event then we need to grab the lock * with preemption disabled and as a write because we * have to update db register and there are chances that * another thread could be doing the same.
*/ if (ev_code >= MHI_EV_CC_OOB)
write_lock_irqsave(&mhi_chan->lock, flags); else
read_lock_bh(&mhi_chan->lock);
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) goto end_process_tx_event;
switch (ev_code) { case MHI_EV_CC_OVERFLOW: case MHI_EV_CC_EOB: case MHI_EV_CC_EOT:
{
dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); struct mhi_ring_element *local_rp, *ev_tre; void *dev_rp, *next_rp; struct mhi_buf_info *buf_info;
u16 xfer_len;
if (!is_valid_ring_ptr(tre_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev, "Event element points outside of the tre ring\n"); break;
} /* Get the TRB this event points to */
ev_tre = mhi_to_virtual(tre_ring, ptr);
next_rp = local_rp + 1; if (next_rp >= tre_ring->base + tre_ring->len)
next_rp = tre_ring->base; if (dev_rp != next_rp && !MHI_TRE_DATA_GET_CHAIN(local_rp)) {
dev_err(&mhi_cntrl->mhi_dev->dev, "Event element points to an unexpected TRE\n"); break;
}
while (local_rp != dev_rp) {
buf_info = buf_ring->rp; /* If it's the last TRE, get length from the event */ if (local_rp == ev_tre)
xfer_len = MHI_TRE_GET_EV_LEN(event); else
xfer_len = buf_info->len;
/* Unmap if it's not pre-mapped by client */ if (likely(!buf_info->pre_mapped))
mhi_cntrl->unmap_single(mhi_cntrl, buf_info);
result.buf_addr = buf_info->cb_buf;
/* truncate to buf len if xfer_len is larger */
result.bytes_xferd =
min_t(u16, xfer_len, buf_info->len);
mhi_del_ring_element(mhi_cntrl, buf_ring);
mhi_del_ring_element(mhi_cntrl, tre_ring);
local_rp = tre_ring->rp;
if (mhi_chan->dir == DMA_TO_DEVICE) {
atomic_dec(&mhi_cntrl->pending_pkts); /* Release the reference got from mhi_queue() */
mhi_cntrl->runtime_put(mhi_cntrl);
}
/* * Recycle the buffer if buffer is pre-allocated, * if there is an error, not much we can do apart * from dropping the packet
*/ if (mhi_chan->pre_alloc) { if (mhi_queue_buf(mhi_chan->mhi_dev,
mhi_chan->dir,
buf_info->cb_buf,
buf_info->len, MHI_EOT)) {
dev_err(dev, "Error recycling buffer for chan:%d\n",
mhi_chan->chan);
kfree(buf_info->cb_buf);
}
}
read_lock_bh(&mhi_chan->lock);
} break;
} /* CC_EOT */ case MHI_EV_CC_OOB: case MHI_EV_CC_DB_MODE:
{ unsignedlong pm_lock_flags;
/* truncate to buf len if xfer_len is larger */
result.bytes_xferd = min_t(u16, xfer_len, buf_info->len);
result.buf_addr = buf_info->cb_buf;
result.dir = mhi_chan->dir;
read_lock_bh(&mhi_chan->lock);
if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) goto end_process_rsc_event;
WARN_ON(!buf_info->used);
/* notify the client */
mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result);
/* * Note: We're arbitrarily incrementing RP even though, completion * packet we processed might not be the same one, reason we can do this * is because device guaranteed to cache descriptors in order it * receive, so even though completion event is different we can re-use * all descriptors in between. * Example: * Transfer Ring has descriptors: A, B, C, D * Last descriptor host queue is D (WP) and first descriptor * host queue is A (RP). * The completion event we just serviced is descriptor C. * Then we can safely queue descriptors to replace A, B, and C * even though host did not receive any completions.
*/
mhi_del_ring_element(mhi_cntrl, tre_ring);
buf_info->used = false;
/* * This is a quick check to avoid unnecessary event processing * in case MHI is already in error state, but it's still possible * to transition to error state while processing events
*/ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) return -EIO;
if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); return -EIO;
}
break;
} case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
mhi_process_cmd_completion(mhi_cntrl, local_rp); break; case MHI_PKT_TYPE_EE_EVENT:
{ enum dev_st_transition st = DEV_ST_TRANSITION_MAX; enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp);
dev_dbg(dev, "Received EE event: %s\n",
TO_MHI_EXEC_STR(event)); switch (event) { case MHI_EE_SBL:
st = DEV_ST_TRANSITION_SBL; break; case MHI_EE_WFW: case MHI_EE_AMSS:
st = DEV_ST_TRANSITION_MISSION_MODE; break; case MHI_EE_FP:
st = DEV_ST_TRANSITION_FP; break; case MHI_EE_RDDM:
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_cntrl->ee = event;
write_unlock_irq(&mhi_cntrl->pm_lock);
wake_up_all(&mhi_cntrl->state_event); break; default:
dev_err(dev, "Unhandled EE event: 0x%x\n", type);
} if (st != DEV_ST_TRANSITION_MAX)
mhi_queue_state_transition(mhi_cntrl, st);
break;
} case MHI_PKT_TYPE_TX_EVENT:
chan = MHI_TRE_GET_EV_CHID(local_rp);
WARN_ON(chan >= mhi_cntrl->max_chan);
/* * Only process the event ring elements whose channel * ID is within the maximum supported range.
*/ if (chan < mhi_cntrl->max_chan) {
mhi_chan = &mhi_cntrl->mhi_chan[chan]; if (!mhi_chan->configured) break;
parse_xfer_event(mhi_cntrl, local_rp, mhi_chan);
} break; default:
dev_err(dev, "Unhandled event type: %d\n", type); break;
}
ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); return -EIO;
}
dev_rp = mhi_to_virtual(ev_ring, ptr);
count++;
}
read_lock_bh(&mhi_cntrl->pm_lock);
/* Ring EV DB only if there is any pending element to process */ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
mhi_ring_er_db(mhi_event);
read_unlock_bh(&mhi_cntrl->pm_lock);
while (dev_rp != local_rp && event_quota > 0) { enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
trace_mhi_data_event(mhi_cntrl, local_rp);
chan = MHI_TRE_GET_EV_CHID(local_rp);
WARN_ON(chan >= mhi_cntrl->max_chan);
/* * Only process the event ring elements whose channel * ID is within the maximum supported range.
*/ if (chan < mhi_cntrl->max_chan &&
mhi_cntrl->mhi_chan[chan].configured) {
mhi_chan = &mhi_cntrl->mhi_chan[chan];
ptr = le64_to_cpu(er_ctxt->rp); if (!is_valid_ring_ptr(ev_ring, ptr)) {
dev_err(&mhi_cntrl->mhi_dev->dev, "Event ring rp points outside of the event ring\n"); return -EIO;
}
/* Ring EV DB only if there is any pending element to process */ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count)
mhi_ring_er_db(mhi_event);
read_unlock_bh(&mhi_cntrl->pm_lock);
/* * We can check PM state w/o a lock here because there is no way * PM state can change from reg access valid to no access while this * thread being executed.
*/ if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { /* * We may have a pending event but not allowed to * process it since we are probably in a suspended state, * so trigger a resume.
*/
mhi_trigger_resume(mhi_cntrl);
return;
}
/* Process ctrl events */
ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
/* * We received an IRQ but no events to process, maybe device went to * SYS_ERR state? Check the state to confirm.
*/ if (!ret) {
write_lock_irq(&mhi_cntrl->pm_lock);
state = mhi_get_mhi_state(mhi_cntrl); if (state == MHI_STATE_SYS_ERR) {
dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
MHI_PM_SYS_ERR_DETECT);
}
write_unlock_irq(&mhi_cntrl->pm_lock); if (pm_state == MHI_PM_SYS_ERR_DETECT)
mhi_pm_sys_err_handler(mhi_cntrl);
}
}
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) return -EIO;
ret = mhi_is_ring_full(mhi_cntrl, tre_ring); if (unlikely(ret)) return -EAGAIN;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); if (unlikely(ret)) return ret;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* Packet is queued, take a usage ref to exit M3 if necessary * for host->device buffer, balanced put is done on buffer completion * for device->host buffer, balanced put is after ringing the DB
*/
mhi_cntrl->runtime_get(mhi_cntrl);
cmd = MHI_CMD_RESET_CHAN; break; case MHI_CH_STATE_TYPE_STOP: if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) return -EINVAL;
cmd = MHI_CMD_STOP_CHAN; break; case MHI_CH_STATE_TYPE_START: if (mhi_chan->ch_state != MHI_CH_STATE_STOP &&
mhi_chan->ch_state != MHI_CH_STATE_DISABLED) return -EINVAL;
cmd = MHI_CMD_START_CHAN; break; default:
dev_err(dev, "%d: Channel state update to %s not allowed\n",
mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); return -EINVAL;
}
/* bring host and device out of suspended states */
ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); if (ret) return ret;
mhi_cntrl->runtime_get(mhi_cntrl);
reinit_completion(&mhi_chan->completion);
ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); if (ret) {
dev_err(dev, "%d: Failed to send %s channel command\n",
mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); goto exit_channel_update;
}
ret = wait_for_completion_timeout(&mhi_chan->completion,
msecs_to_jiffies(mhi_cntrl->timeout_ms)); if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) {
dev_err(dev, "%d: Failed to receive %s channel command completion\n",
mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
ret = -EIO; goto exit_channel_update;
}
if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); goto exit_unprepare_channel;
}
/* no more processing events for this channel */
ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
MHI_CH_STATE_TYPE_RESET); if (ret)
dev_err(dev, "%d: Failed to reset channel, still resetting\n",
mhi_chan->chan);
staticint mhi_prepare_channel(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, unsignedint flags)
{ int ret = 0; struct device *dev = &mhi_chan->mhi_dev->dev;
if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) {
dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n",
TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); return -ENOTCONN;
}
mutex_lock(&mhi_chan->mutex);
/* Check of client manages channel context for offload channels */ if (!mhi_chan->offload_ch) {
ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); if (ret) goto error_init_chan;
}
ret = mhi_update_channel_state(mhi_cntrl, mhi_chan,
MHI_CH_STATE_TYPE_START); if (ret) goto error_pm_state;
if (mhi_chan->dir == DMA_FROM_DEVICE)
mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
/* Pre-allocate buffer for xfer ring */ if (mhi_chan->pre_alloc) { int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
&mhi_chan->tre_ring);
size_t len = mhi_cntrl->buffer_len;
while (nr_el--) { void *buf; struct mhi_buf_info info = { };
buf = kmalloc(len, GFP_KERNEL); if (!buf) {
ret = -ENOMEM; goto error_pre_alloc;
}
/* Prepare transfer descriptors */
info.v_addr = buf;
info.cb_buf = buf;
info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); if (ret) {
kfree(buf); goto error_pre_alloc;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.