if (chan->cl->tx_prepare)
chan->cl->tx_prepare(chan->cl, data); /* Try to submit a message to the MBOX controller */
err = chan->mbox->ops->send_data(chan, data); if (!err) {
chan->active_req = data;
chan->msg_count--;
}
}
if (!err && (chan->txdone_method & TXDONE_BY_POLL)) { /* kick start the timer immediately to avoid delays */
scoped_guard(spinlock_irqsave, &chan->mbox->poll_hrt_lock)
hrtimer_start(&chan->mbox->poll_hrt, 0, HRTIMER_MODE_REL);
}
}
staticvoid tx_tick(struct mbox_chan *chan, int r)
{ void *mssg;
/** * mbox_chan_received_data - A way for controller driver to push data * received from remote to the upper layer. * @chan: Pointer to the mailbox channel on which RX happened. * @mssg: Client specific message typecasted as void * * * After startup and before shutdown any data received on the chan * is passed on to the API via atomic mbox_chan_received_data(). * The controller should ACK the RX only after this call returns.
*/ void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
{ /* No buffering the received data */ if (chan->cl->rx_callback)
chan->cl->rx_callback(chan->cl, mssg);
}
EXPORT_SYMBOL_GPL(mbox_chan_received_data);
/** * mbox_chan_txdone - A way for controller driver to notify the * framework that the last TX has completed. * @chan: Pointer to the mailbox chan on which TX happened. * @r: Status of last TX - OK or ERROR * * The controller that has IRQ for TX ACK calls this atomic API * to tick the TX state machine. It works only if txdone_irq * is set by the controller.
*/ void mbox_chan_txdone(struct mbox_chan *chan, int r)
{ if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
dev_err(chan->mbox->dev, "Controller can't run the TX ticker\n"); return;
}
/** * mbox_client_txdone - The way for a client to run the TX state machine. * @chan: Mailbox channel assigned to this client. * @r: Success status of last transmission. * * The client/protocol had received some 'ACK' packet and it notifies * the API that the last packet was sent successfully. This only works * if the controller can't sense TX-Done.
*/ void mbox_client_txdone(struct mbox_chan *chan, int r)
{ if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
dev_err(chan->mbox->dev, "Client can't run the TX ticker\n"); return;
}
/** * mbox_client_peek_data - A way for client driver to pull data * received from remote by the controller. * @chan: Mailbox channel assigned to this client. * * A poke to controller driver for any received data. * The data is actually passed onto client via the * mbox_chan_received_data() * The call can be made from atomic context, so the controller's * implementation of peek_data() must not sleep. * * Return: True, if controller has, and is going to push after this, * some data. * False, if controller doesn't have any data to be read.
*/ bool mbox_client_peek_data(struct mbox_chan *chan)
{ if (chan->mbox->ops->peek_data) return chan->mbox->ops->peek_data(chan);
/** * mbox_send_message - For client to submit a message to be * sent to the remote. * @chan: Mailbox channel assigned to this client. * @mssg: Client specific message typecasted. * * For client to submit data to the controller destined for a remote * processor. If the client had set 'tx_block', the call will return * either when the remote receives the data or when 'tx_tout' millisecs * run out. * In non-blocking mode, the requests are buffered by the API and a * non-negative token is returned for each queued request. If the request * is not queued, a negative token is returned. Upon failure or successful * TX, the API calls 'tx_done' from atomic context, from which the client * could submit yet another request. * The pointer to message should be preserved until it is sent * over the chan, i.e, tx_done() is made. * This function could be called from atomic context as it simply * queues the data and returns a token against the request. * * Return: Non-negative integer for successful submission (non-blocking mode) * or transmission over chan (blocking mode). * Negative value denotes failure.
*/ int mbox_send_message(struct mbox_chan *chan, void *mssg)
{ int t;
if (!chan || !chan->cl) return -EINVAL;
t = add_to_rbuf(chan, mssg); if (t < 0) {
dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n"); return t;
}
msg_submit(chan);
if (chan->cl->tx_block) { unsignedlong wait; int ret;
ret = wait_for_completion_timeout(&chan->tx_complete, wait); if (ret == 0) {
t = -ETIME;
tx_tick(chan, t);
}
}
return t;
}
EXPORT_SYMBOL_GPL(mbox_send_message);
/** * mbox_flush - flush a mailbox channel * @chan: mailbox channel to flush * @timeout: time, in milliseconds, to allow the flush operation to succeed * * Mailbox controllers that need to work in atomic context can implement the * ->flush() callback to busy loop until a transmission has been completed. * The implementation must call mbox_chan_txdone() upon success. Clients can * call the mbox_flush() function at any time after mbox_send_message() to * flush the transmission. After the function returns success, the mailbox * transmission is guaranteed to have completed. * * Returns: 0 on success or a negative error code on failure.
*/ int mbox_flush(struct mbox_chan *chan, unsignedlong timeout)
{ int ret;
if (!chan->mbox->ops->flush) return -ENOTSUPP;
ret = chan->mbox->ops->flush(chan, timeout); if (ret < 0)
tx_tick(chan, ret);
if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
chan->txdone_method = TXDONE_BY_ACK;
}
if (chan->mbox->ops->startup) {
ret = chan->mbox->ops->startup(chan);
if (ret) {
dev_err(dev, "Unable to startup the chan (%d)\n", ret);
mbox_free_channel(chan); return ret;
}
}
return 0;
}
/** * mbox_bind_client - Request a mailbox channel. * @chan: The mailbox channel to bind the client to. * @cl: Identity of the client requesting the channel. * * The Client specifies its requirements and capabilities while asking for * a mailbox channel. It can't be called from atomic context. * The channel is exclusively allocated and can't be used by another * client before the owner calls mbox_free_channel. * After assignment, any packet received on this channel will be * handed over to the client via the 'rx_callback'. * The framework holds reference to the client, so the mbox_client * structure shouldn't be modified until the mbox_free_channel returns. * * Return: 0 if the channel was assigned to the client successfully. * <0 for request failure.
*/ int mbox_bind_client(struct mbox_chan *chan, struct mbox_client *cl)
{
guard(mutex)(&con_mutex);
/** * mbox_request_channel - Request a mailbox channel. * @cl: Identity of the client requesting the channel. * @index: Index of mailbox specifier in 'mboxes' property. * * The Client specifies its requirements and capabilities while asking for * a mailbox channel. It can't be called from atomic context. * The channel is exclusively allocated and can't be used by another * client before the owner calls mbox_free_channel. * After assignment, any packet received on this channel will be * handed over to the client via the 'rx_callback'. * The framework holds reference to the client, so the mbox_client * structure shouldn't be modified until the mbox_free_channel returns. * * Return: Pointer to the channel assigned to the client if successful. * ERR_PTR for request failure.
*/ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
{ struct device *dev = cl->dev; struct mbox_controller *mbox; struct of_phandle_args spec; struct mbox_chan *chan; int ret;
if (!dev || !dev->of_node) {
pr_debug("%s: No owner device node\n", __func__); return ERR_PTR(-ENODEV);
}
ret = of_parse_phandle_with_args(dev->of_node, "mboxes", "#mbox-cells",
index, &spec); if (ret) {
dev_err(dev, "%s: can't parse \"mboxes\" property\n", __func__); return ERR_PTR(ret);
}
if (!np) {
dev_err(cl->dev, "%s() currently only supports DT\n", __func__); return ERR_PTR(-EINVAL);
}
index = of_property_match_string(np, "mbox-names", name); if (index < 0) {
dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
__func__, name); return ERR_PTR(index);
} return mbox_request_channel(cl, index);
}
EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
/** * mbox_free_channel - The client relinquishes control of a mailbox * channel by this call. * @chan: The mailbox channel to be freed.
*/ void mbox_free_channel(struct mbox_chan *chan)
{ if (!chan || !chan->cl) return;
if (chan->mbox->ops->shutdown)
chan->mbox->ops->shutdown(chan);
/* The queued TX requests are simply aborted, no callbacks are made */
scoped_guard(spinlock_irqsave, &chan->lock) {
chan->cl = NULL;
chan->active_req = NULL; if (chan->txdone_method == TXDONE_BY_ACK)
chan->txdone_method = TXDONE_BY_POLL;
}
staticstruct mbox_chan *
of_mbox_index_xlate(struct mbox_controller *mbox, conststruct of_phandle_args *sp)
{ int ind = sp->args[0];
if (ind >= mbox->num_chans) return ERR_PTR(-EINVAL);
return &mbox->chans[ind];
}
/** * mbox_controller_register - Register the mailbox controller * @mbox: Pointer to the mailbox controller. * * The controller driver registers its communication channels
*/ int mbox_controller_register(struct mbox_controller *mbox)
{ int i, txdone;
if (mbox->txdone_irq)
txdone = TXDONE_BY_IRQ; elseif (mbox->txdone_poll)
txdone = TXDONE_BY_POLL; else/* It has to be ACK then */
txdone = TXDONE_BY_ACK;
if (txdone == TXDONE_BY_POLL) {
if (!mbox->ops->last_tx_done) {
dev_err(mbox->dev, "last_tx_done method is absent\n"); return -EINVAL;
}
/** * devm_mbox_controller_register() - managed mbox_controller_register() * @dev: device owning the mailbox controller being registered * @mbox: mailbox controller being registered * * This function adds a device-managed resource that will make sure that the * mailbox controller, which is registered using mbox_controller_register() * as part of this function, will be unregistered along with the rest of * device-managed resources upon driver probe failure or driver removal. * * Returns 0 on success or a negative error code on failure.
*/ int devm_mbox_controller_register(struct device *dev, struct mbox_controller *mbox)
{ struct mbox_controller **ptr; int err;
ptr = devres_alloc(__devm_mbox_controller_unregister, sizeof(*ptr),
GFP_KERNEL); if (!ptr) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.