/** * struct acpm_chan_shmem - descriptor of a shared memory channel. * * @id: channel ID. * @reserved: unused fields. * @rx_rear: rear pointer of APM RX queue (TX for AP). * @rx_front: front pointer of APM RX queue (TX for AP). * @rx_base: base address of APM RX queue (TX for AP). * @reserved1: unused fields. * @tx_rear: rear pointer of APM TX queue (RX for AP). * @tx_front: front pointer of APM TX queue (RX for AP). * @tx_base: base address of APM TX queue (RX for AP). * @qlen: queue length. Applies to both TX/RX queues. * @mlen: message length. Applies to both TX/RX queues. * @reserved2: unused fields. * @poll_completion: true when the channel works on polling.
*/ struct acpm_chan_shmem {
u32 id;
u32 reserved[3];
u32 rx_rear;
u32 rx_front;
u32 rx_base;
u32 reserved1[3];
u32 tx_rear;
u32 tx_front;
u32 tx_base;
u32 qlen;
u32 mlen;
u32 reserved2[2];
u32 poll_completion;
};
/** * struct acpm_queue - exynos acpm queue. * * @rear: rear address of the queue. * @front: front address of the queue. * @base: base address of the queue.
*/ struct acpm_queue { void __iomem *rear; void __iomem *front; void __iomem *base;
};
/** * struct acpm_rx_data - RX queue data. * * @cmd: pointer to where the data shall be saved. * @n_cmd: number of 32-bit commands. * @response: true if the client expects the RX data.
*/ struct acpm_rx_data {
u32 *cmd;
size_t n_cmd; bool response;
};
#define ACPM_SEQNUM_MAX 64
/** * struct acpm_chan - driver internal representation of a channel. * @cl: mailbox client. * @chan: mailbox channel. * @acpm: pointer to driver private data. * @tx: TX queue. The enqueue is done by the host. * - front index is written by the host. * - rear index is written by the firmware. * * @rx: RX queue. The enqueue is done by the firmware. * - front index is written by the firmware. * - rear index is written by the host. * @tx_lock: protects TX queue. * @rx_lock: protects RX queue. * @qlen: queue length. Applies to both TX/RX queues. * @mlen: message length. Applies to both TX/RX queues. * @seqnum: sequence number of the last message enqueued on TX queue. * @id: channel ID. * @poll_completion: indicates if the transfer needs to be polled for * completion or interrupt mode is used. * @bitmap_seqnum: bitmap that tracks the messages on the TX/RX queues. * @rx_data: internal buffer used to drain the RX queue.
*/ struct acpm_chan { struct mbox_client cl; struct mbox_chan *chan; struct acpm_info *acpm; struct acpm_queue tx; struct acpm_queue rx; struct mutex tx_lock; struct mutex rx_lock;
/** * struct acpm_info - driver's private data. * @shmem: pointer to the SRAM configuration data. * @sram_base: base address of SRAM. * @chans: pointer to the ACPM channel parameters retrieved from SRAM. * @dev: pointer to the exynos-acpm device. * @handle: instance of acpm_handle to send to clients. * @num_chans: number of channels available for this controller.
*/ struct acpm_info { struct acpm_shmem __iomem *shmem; void __iomem *sram_base; struct acpm_chan *chans; struct device *dev; struct acpm_handle handle;
u32 num_chans;
};
/** * struct acpm_match_data - of_device_id data. * @initdata_base: offset in SRAM where the channels configuration resides.
*/ struct acpm_match_data {
loff_t initdata_base;
};
if (i == rx_front) {
acpm_get_saved_rx(achan, xfer, tx_seqnum); return 0;
}
base = achan->rx.base;
mlen = achan->mlen;
/* Drain RX queue. */ do { /* Read RX seqnum. */
addr = base + mlen * i;
val = readl(addr);
rx_seqnum = FIELD_GET(ACPM_PROTOCOL_SEQNUM, val); if (!rx_seqnum) return -EIO; /* * mssg seqnum starts with value 1, whereas the driver considers * the first mssg at index 0.
*/
seqnum = rx_seqnum - 1;
rx_data = &achan->rx_data[seqnum];
if (rx_data->response) { if (rx_seqnum == tx_seqnum) {
__ioread32_copy(xfer->rxd, addr,
xfer->rxlen / 4);
rx_set = true;
clear_bit(seqnum, achan->bitmap_seqnum);
} else { /* * The RX data corresponds to another request. * Save the data to drain the queue, but don't * clear yet the bitmap. It will be cleared * after the response is copied to the request.
*/
__ioread32_copy(rx_data->cmd, addr,
xfer->rxlen / 4);
}
} else {
clear_bit(seqnum, achan->bitmap_seqnum);
}
i = (i + 1) % achan->qlen;
} while (i != rx_front);
/* We saved all responses, mark RX empty. */
writel(rx_front, achan->rx.rear);
/* * If the response was not in this iteration of the queue, check if the * RX data was previously saved.
*/ if (!rx_set)
acpm_get_saved_rx(achan, xfer, tx_seqnum);
return 0;
}
/** * acpm_dequeue_by_polling() - RX dequeue by polling. * @achan: ACPM channel info. * @xfer: reference to the transfer being waited for. * * Return: 0 on success, -errno otherwise.
*/ staticint acpm_dequeue_by_polling(struct acpm_chan *achan, conststruct acpm_xfer *xfer)
{ struct device *dev = achan->acpm->dev;
ktime_t timeout;
u32 seqnum; int ret;
/** * acpm_wait_for_queue_slots() - wait for queue slots. * * @achan: ACPM channel info. * @next_tx_front: next front index of the TX queue. * * Return: 0 on success, -errno otherwise.
*/ staticint acpm_wait_for_queue_slots(struct acpm_chan *achan, u32 next_tx_front)
{
u32 val, ret;
/* * Wait for RX front to keep up with TX front. Make sure there's at * least one element between them.
*/
ret = readl_poll_timeout(achan->rx.front, val, next_tx_front != val, 0,
ACPM_TX_TIMEOUT_US); if (ret) {
dev_err(achan->acpm->dev, "RX front can not keep up with TX front.\n"); return ret;
}
ret = readl_poll_timeout(achan->tx.rear, val, next_tx_front != val, 0,
ACPM_TX_TIMEOUT_US); if (ret)
dev_err(achan->acpm->dev, "TX queue is full.\n");
return ret;
}
/** * acpm_prepare_xfer() - prepare a transfer before writing the message to the * TX queue. * @achan: ACPM channel info. * @xfer: reference to the transfer being prepared.
*/ staticvoid acpm_prepare_xfer(struct acpm_chan *achan, conststruct acpm_xfer *xfer)
{ struct acpm_rx_data *rx_data;
u32 *txd = (u32 *)xfer->txd;
/* Prevent chan->seqnum from being re-used */ do { if (++achan->seqnum == ACPM_SEQNUM_MAX)
achan->seqnum = 1;
} while (test_bit(achan->seqnum - 1, achan->bitmap_seqnum));
/* Clear data for upcoming responses */
rx_data = &achan->rx_data[achan->seqnum - 1];
memset(rx_data->cmd, 0, sizeof(*rx_data->cmd) * rx_data->n_cmd); if (xfer->rxd)
rx_data->response = true;
/* Flag the index based on seqnum. (seqnum: 1~63, bitmap: 0~62) */
set_bit(achan->seqnum - 1, achan->bitmap_seqnum);
}
/** * acpm_wait_for_message_response - an helper to group all possible ways of * waiting for a synchronous message response. * * @achan: ACPM channel info. * @xfer: reference to the transfer being waited for. * * Return: 0 on success, -errno otherwise.
*/ staticint acpm_wait_for_message_response(struct acpm_chan *achan, conststruct acpm_xfer *xfer)
{ /* Just polling mode supported for now. */ return acpm_dequeue_by_polling(achan, xfer);
}
/** * acpm_do_xfer() - do one transfer. * @handle: pointer to the acpm handle. * @xfer: transfer to initiate and wait for response. * * Return: 0 on success, -errno otherwise.
*/ int acpm_do_xfer(conststruct acpm_handle *handle, conststruct acpm_xfer *xfer)
{ struct acpm_info *acpm = handle_to_acpm_info(handle); struct exynos_mbox_msg msg; struct acpm_chan *achan;
u32 idx, tx_front; int ret;
if (xfer->acpm_chan_id >= acpm->num_chans) return -EINVAL;
/** * acpm_get_by_node() - get the ACPM handle using node pointer. * @dev: device pointer requesting ACPM handle. * @np: ACPM device tree node. * * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/ staticconststruct acpm_handle *acpm_get_by_node(struct device *dev, struct device_node *np)
{ struct platform_device *pdev; struct device_link *link; struct acpm_info *acpm;
pdev = of_find_device_by_node(np); if (!pdev) return ERR_PTR(-EPROBE_DEFER);
acpm = platform_get_drvdata(pdev); if (!acpm) {
platform_device_put(pdev); return ERR_PTR(-EPROBE_DEFER);
}
if (!try_module_get(pdev->dev.driver->owner)) {
platform_device_put(pdev); return ERR_PTR(-EPROBE_DEFER);
}
link = device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); if (!link) {
dev_err(&pdev->dev, "Failed to create device link to consumer %s.\n",
dev_name(dev));
platform_device_put(pdev);
module_put(pdev->dev.driver->owner); return ERR_PTR(-EINVAL);
}
return &acpm->handle;
}
/** * devm_acpm_get_by_node() - managed get handle using node pointer. * @dev: device pointer requesting ACPM handle. * @np: ACPM device tree node. * * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
*/ conststruct acpm_handle *devm_acpm_get_by_node(struct device *dev, struct device_node *np)
{ conststruct acpm_handle **ptr, *handle;
ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.