for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]); if (ret) {
dev_err(dev, "Could not capture OEM PK HASH\n"); return ret;
}
}
for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++)
cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", i, hash_segment[i]);
/* MHI protocol requires the transfer ring to be aligned with ring length */ staticint mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, struct mhi_ring *ring,
u64 len)
{
ring->alloc_size = len + (len - 1);
ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size,
&ring->dma_handle, GFP_KERNEL); if (!ring->pre_aligned) return -ENOMEM;
staticint mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{ struct mhi_event *mhi_event = mhi_cntrl->mhi_event; unsignedlong irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; int i, ret;
/* if controller driver has set irq_flags, use it */ if (mhi_cntrl->irq_flags)
irq_flags = mhi_cntrl->irq_flags;
/* Setup BHI_INTVEC IRQ */
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
mhi_intvec_threaded_handler,
irq_flags, "bhi", mhi_cntrl); if (ret) return ret; /* * IRQs should be enabled during mhi_async_power_up(), so disable them explicitly here. * Due to the use of IRQF_SHARED flag as default while requesting IRQs, we assume that * IRQ_NOAUTOEN is not applicable.
*/
disable_irq(mhi_cntrl->irq[0]);
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { if (mhi_event->offload_ev) continue;
if (mhi_event->irq >= mhi_cntrl->nr_irqs) {
dev_err(mhi_cntrl->cntrl_dev, "irq %d not available for event ring\n",
mhi_event->irq);
ret = -EINVAL; goto error_request;
}
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
mhi_irq_handler,
irq_flags, "mhi", mhi_event); if (ret) {
dev_err(mhi_cntrl->cntrl_dev, "Error requesting irq:%d for ev:%d\n",
mhi_cntrl->irq[mhi_event->irq], i); goto error_request;
}
disable_irq(mhi_cntrl->irq[mhi_event->irq]);
}
return 0;
error_request: for (--i, --mhi_event; i >= 0; i--, mhi_event--) { if (mhi_event->offload_ev) continue;
mhi_chan = mhi_cntrl->mhi_chan;
chan_ctxt = mhi_ctxt->chan_ctxt; for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { /* Skip if it is an offload channel */ if (mhi_chan->offload_ch) continue;
ring->el_size = sizeof(struct mhi_ring_element);
ring->len = ring->el_size * ring->elements;
ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); if (ret) goto error_alloc_er;
/* * If the read pointer equals to the write pointer, then the * ring is empty
*/
ring->rp = ring->wp = ring->base;
er_ctxt->rbase = cpu_to_le64(ring->iommu_base);
er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase;
er_ctxt->rlen = cpu_to_le64(ring->len);
ring->ctxt_wp = &er_ctxt->wp;
}
/* Setup cmd context */
ret = -ENOMEM;
mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->cmd_ctxt) *
NR_OF_CMD_RINGS,
&mhi_ctxt->cmd_ctxt_addr,
GFP_KERNEL); if (!mhi_ctxt->cmd_ctxt) goto error_alloc_er;
mhi_cmd = mhi_cntrl->mhi_cmd;
cmd_ctxt = mhi_ctxt->cmd_ctxt; for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { struct mhi_ring *ring = &mhi_cmd->ring;
/* Read channel db offset */
ret = mhi_get_channel_doorbell_offset(mhi_cntrl, &val); if (ret) return ret;
if (val >= mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)) {
dev_err(dev, "CHDB offset: 0x%x is out of range: 0x%zx\n",
val, mhi_cntrl->reg_len - (8 * MHI_DEV_WAKE_DB)); return -ERANGE;
}
/* Setup wake db */
mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB);
mhi_cntrl->wake_set = false;
/* Setup channel db address for each channel in tre_ring */
mhi_chan = mhi_cntrl->mhi_chan; for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
mhi_chan->tre_ring.db_addr = base + val;
/* Read event ring db offset */
ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val); if (ret) {
dev_err(dev, "Unable to read ERDBOFF register\n"); return -EIO;
}
if (val >= mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)) {
dev_err(dev, "ERDB offset: 0x%x is out of range: 0x%zx\n",
val, mhi_cntrl->reg_len - (8 * mhi_cntrl->total_ev_rings)); return -ERANGE;
}
/* Setup event db address for each ev_ring */
mhi_event = mhi_cntrl->mhi_event; for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { if (mhi_event->offload_ev) continue;
mhi_event->ring.db_addr = base + val;
}
/* Setup DB register for primary CMD rings */
mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER;
/* Write to MMIO registers */ for (i = 0; reg_info[i].offset; i++)
mhi_write_reg(mhi_cntrl, base, reg_info[i].offset,
reg_info[i].val);
ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NER_MASK,
mhi_cntrl->total_ev_rings); if (ret) {
dev_err(dev, "Unable to write MHICFG register\n"); return ret;
}
ret = mhi_write_reg_field(mhi_cntrl, base, MHICFG, MHICFG_NHWER_MASK,
mhi_cntrl->hw_ev_rings); if (ret) {
dev_err(dev, "Unable to write MHICFG register\n"); return ret;
}
if (event_cfg->channel != U32_MAX) { /* This event ring has a dedicated channel */
mhi_event->chan = event_cfg->channel; if (mhi_event->chan >= mhi_cntrl->max_chan) {
dev_err(dev, "Event Ring channel not available\n"); goto error_ev_cfg;
}
switch (mhi_event->data_type) { case MHI_ER_DATA:
mhi_event->process_event = mhi_process_data_event_ring; break; case MHI_ER_CTRL:
mhi_event->process_event = mhi_process_ctrl_ev_ring; break; default:
dev_err(dev, "Event Ring type not supported\n"); goto error_ev_cfg;
}
mhi_event->hw_ring = event_cfg->hardware_event; if (mhi_event->hw_ring)
mhi_cntrl->hw_ev_rings++; else
mhi_cntrl->sw_ev_rings++;
/* * The allocation of MHI channels can exceed 32KB in some scenarios, * so to avoid any memory possible allocation failures, vzalloc is * used here
*/
mhi_cntrl->mhi_chan = vcalloc(mhi_cntrl->max_chan, sizeof(*mhi_cntrl->mhi_chan)); if (!mhi_cntrl->mhi_chan) return -ENOMEM;
INIT_LIST_HEAD(&mhi_cntrl->lpm_chans);
/* Populate channel configurations */ for (i = 0; i < config->num_channels; i++) { struct mhi_chan *mhi_chan;
ch_cfg = &config->ch_cfg[i];
chan = ch_cfg->num; if (chan >= mhi_cntrl->max_chan) {
dev_err(dev, "Channel %d not available\n", chan); goto error_chan_cfg;
}
mhi_chan->tre_ring.elements = ch_cfg->num_elements; if (!mhi_chan->tre_ring.elements) goto error_chan_cfg;
/* * For some channels, local ring length should be bigger than * the transfer ring length due to internal logical channels * in device. So host can queue much more buffers than transfer * ring length. Example, RSC channels should have a larger local * channel length than transfer ring length.
*/
mhi_chan->buf_ring.elements = ch_cfg->local_elements; if (!mhi_chan->buf_ring.elements)
mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements;
mhi_chan->er_index = ch_cfg->event_ring;
mhi_chan->dir = ch_cfg->dir;
/* * For most channels, chtype is identical to channel directions. * So, if it is not defined then assign channel direction to * chtype
*/
mhi_chan->type = ch_cfg->type; if (!mhi_chan->type)
mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir;
/* * If MHI host allocates buffers, then the channel direction * should be DMA_FROM_DEVICE
*/ if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) {
dev_err(dev, "Invalid channel configuration\n"); goto error_chan_cfg;
}
/* * Bi-directional and direction less channel must be an * offload channel
*/ if ((mhi_chan->dir == DMA_BIDIRECTIONAL ||
mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) {
dev_err(dev, "Invalid channel configuration\n"); goto error_chan_cfg;
}
if (!mhi_chan->offload_ch) {
mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) {
dev_err(dev, "Invalid Door bell mode\n"); goto error_chan_cfg;
}
}
/* By default, host is allowed to ring DB in both M0 and M2 states */
mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; if (config->m2_no_db)
mhi_cntrl->db_access &= ~MHI_PM_M2;
mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); if (!mhi_cntrl->hiprio_wq) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n");
ret = -ENOMEM; goto err_free_cmd;
}
mhi_cmd = mhi_cntrl->mhi_cmd; for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++)
spin_lock_init(&mhi_cmd->lock);
mhi_event = mhi_cntrl->mhi_event; for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { /* Skip for offload events */ if (mhi_event->offload_ev) continue;
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); if (mhi_cntrl->index < 0) {
ret = mhi_cntrl->index; goto err_destroy_wq;
}
ret = mhi_init_irq_setup(mhi_cntrl); if (ret) goto err_ida_free;
/* Register controller with MHI bus */
mhi_dev = mhi_alloc_device(mhi_cntrl); if (IS_ERR(mhi_dev)) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n");
ret = PTR_ERR(mhi_dev); goto error_setup_irq;
}
/* Drop the references to MHI devices created for channels */ for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { if (!mhi_chan->mhi_dev) continue;
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl)
{ struct device *dev = &mhi_cntrl->mhi_dev->dev;
u32 bhi_off, bhie_off; int ret;
mutex_lock(&mhi_cntrl->pm_mutex);
ret = mhi_init_dev_ctxt(mhi_cntrl); if (ret) goto error_dev_ctxt;
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); if (ret) {
dev_err(dev, "Error getting BHI offset\n"); goto error_reg_offset;
}
if (bhi_off >= mhi_cntrl->reg_len) {
dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n",
bhi_off, mhi_cntrl->reg_len);
ret = -ERANGE; goto error_reg_offset;
}
mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off;
if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size || mhi_cntrl->seg_len) {
ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF,
&bhie_off); if (ret) {
dev_err(dev, "Error getting BHIE offset\n"); goto error_reg_offset;
}
if (bhie_off >= mhi_cntrl->reg_len) {
dev_err(dev, "BHIe offset: 0x%x is out of range: 0x%zx\n",
bhie_off, mhi_cntrl->reg_len);
ret = -ERANGE; goto error_reg_offset;
}
mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off;
}
if (mhi_cntrl->rddm_size) { /* * This controller supports RDDM, so we need to manually clear * BHIE RX registers since POR values are undefined.
*/
memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS,
0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS +
4); /* * Allocate RDDM table for debugging purpose if specified
*/
mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image,
mhi_cntrl->rddm_size); if (mhi_cntrl->rddm_image) {
ret = mhi_rddm_prepare(mhi_cntrl,
mhi_cntrl->rddm_image); if (ret) {
mhi_free_bhie_table(mhi_cntrl,
mhi_cntrl->rddm_image); goto error_reg_offset;
}
}
}
/* * We need to set the mhi_chan->mhi_dev to NULL here since the MHI * devices for the channels will only get created if the mhi_dev * associated with it is NULL. This scenario will happen during the * controller suspend and resume.
*/ if (mhi_dev->ul_chan)
mhi_dev->ul_chan->mhi_dev = NULL;
if (mhi_dev->dl_chan)
mhi_dev->dl_chan->mhi_dev = NULL;
mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); if (!mhi_dev) return ERR_PTR(-ENOMEM);
dev = &mhi_dev->dev;
device_initialize(dev);
dev->bus = &mhi_bus_type;
dev->release = mhi_release_device;
if (mhi_cntrl->mhi_dev) { /* for MHI client devices, parent is the MHI controller device */
dev->parent = &mhi_cntrl->mhi_dev->dev;
} else { /* for MHI controller device, parent is the bus device (e.g. pci device) */
dev->parent = mhi_cntrl->cntrl_dev;
}
/* Bring device out of LPM */
ret = mhi_device_get_sync(mhi_dev); if (ret) return ret;
ret = -EINVAL;
if (ul_chan) { /* * If channel supports LPM notifications then status_cb should * be provided
*/ if (ul_chan->lpm_notify && !mhi_drv->status_cb) goto exit_probe;
/* For non-offload channels then xfer_cb should be provided */ if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) goto exit_probe;
ul_chan->xfer_cb = mhi_drv->ul_xfer_cb;
}
ret = -EINVAL; if (dl_chan) { /* * If channel supports LPM notifications then status_cb should * be provided
*/ if (dl_chan->lpm_notify && !mhi_drv->status_cb) goto exit_probe;
/* For non-offload channels then xfer_cb should be provided */ if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) goto exit_probe;
/* * If the channel event ring is managed by client, then * status_cb must be provided so that the framework can * notify pending data
*/ if (mhi_event->cl_manage && !mhi_drv->status_cb) goto exit_probe;
dl_chan->xfer_cb = mhi_drv->dl_xfer_cb;
}
/* Call the user provided probe function */
ret = mhi_drv->probe(mhi_dev, mhi_dev->id); if (ret) goto exit_probe;
/* Skip if it is a controller device */ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) return 0;
/* Reset both channels */ for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
if (!mhi_chan) continue;
/* Wake all threads waiting for completion */
write_lock_irq(&mhi_chan->lock);
mhi_chan->ccs = MHI_EV_CC_INVALID;
complete_all(&mhi_chan->completion);
write_unlock_irq(&mhi_chan->lock);
/* Set the channel state to disabled */
mutex_lock(&mhi_chan->mutex);
write_lock_irq(&mhi_chan->lock);
ch_state[dir] = mhi_chan->ch_state;
mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED;
write_unlock_irq(&mhi_chan->lock);
/* Reset the non-offload channel */ if (!mhi_chan->offload_ch)
mhi_reset_chan(mhi_cntrl, mhi_chan);
mutex_unlock(&mhi_chan->mutex);
}
mhi_drv->remove(mhi_dev);
/* De-init channel if it was enabled */ for (dir = 0; dir < 2; dir++) {
mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan;
/* * If the device is a controller type then there is no client driver * associated with it
*/ if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) return 0;
for (id = mhi_drv->id_table; id->chan[0]; id++) if (!strcmp(mhi_dev->name, id->chan)) {
mhi_dev->id = id; return 1;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.