// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2015, Sony Mobile Communications AB. * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
*/
/* * The Qualcomm Shared Memory communication solution provides point-to-point * channels for clients to send and receive streaming or packet based data. * * Each channel consists of a control item (channel info) and a ring buffer * pair. The channel info carry information related to channel state, flow * control and the offsets within the ring buffer. * * All allocated channels are listed in an allocation table, identifying the * pair of items by name, type and remote processor. * * Upon creating a new channel the remote processor allocates channel info and * ring buffer items from the smem heap and populate the allocation table. An * interrupt is sent to the other end of the channel and a scan for new * channels should be done. A channel never goes away, it will only change * state. * * The remote processor signals it intent for bring up the communication * channel by setting the state of its end of the channel to "opening" and * sends out an interrupt. We detect this change and register a smd device to * consume the channel. Upon finding a consumer we finish the handshake and the * channel is up. * * Upon closing a channel, the remote processor will update the state of its * end of the channel and signal us, we will then unregister any attached * device and close our end of the channel. * * Devices attached to a channel can use the qcom_smd_send function to push * data to the channel, this is done by copying the data into the tx ring * buffer, updating the pointers in the channel info and signaling the remote * processor. * * The remote processor does the equivalent when it transfer data and upon * receiving the interrupt we check the channel info for new data and delivers * this to the attached device. If the device is not ready to receive the data * we leave it in the ring buffer for now.
*/
/* * This lists the various smem heap items relevant for the allocation table and * smd channel entries.
*/ staticconststruct { unsigned alloc_tbl_id; unsigned info_base_id; unsigned fifo_base_id;
} smem_items[SMD_ALLOC_TBL_COUNT] = {
{
.alloc_tbl_id = 13,
.info_base_id = 14,
.fifo_base_id = 338
},
{
.alloc_tbl_id = 266,
.info_base_id = 138,
.fifo_base_id = 202,
},
};
/** * struct qcom_smd_edge - representing a remote processor * @dev: device associated with this edge * @name: name of this edge * @of_node: of_node handle for information related to this edge * @edge_id: identifier of this edge * @remote_pid: identifier of remote processor * @irq: interrupt for signals on this edge * @ipc_regmap: regmap handle holding the outgoing ipc register * @ipc_offset: offset within @ipc_regmap of the register for ipc * @ipc_bit: bit in the register at @ipc_offset of @ipc_regmap * @mbox_client: mailbox client handle * @mbox_chan: apcs ipc mailbox channel handle * @channels: list of all channels detected on this edge * @channels_lock: guard for modifications of @channels * @allocated: array of bitmaps representing already allocated channels * @smem_available: last available amount of smem triggering a channel scan * @new_channel_event: wait queue for new channel events * @scan_work: work item for discovering new channels * @state_work: work item for edge state changes
*/ struct qcom_smd_edge { struct device dev;
/** * struct qcom_smd_channel - smd channel struct * @edge: qcom_smd_edge this channel is living on * @qsept: reference to a associated smd endpoint * @registered: flag to indicate if the channel is registered * @name: name of the channel * @state: local state of the channel * @remote_state: remote state of the channel * @state_change_event: state change event * @info: byte aligned outgoing/incoming channel info * @info_word: word aligned outgoing/incoming channel info * @tx_lock: lock to make writes to the channel mutually exclusive * @fblockread_event: wakeup event tied to tx fBLOCKREADINTR * @tx_fifo: pointer to the outgoing ring buffer * @rx_fifo: pointer to the incoming ring buffer * @fifo_size: size of each ring buffer * @bounce_buffer: bounce buffer for reading wrapped packets * @cb: callback function registered for this channel * @recv_lock: guard for rx info modifications and cb pointer * @pkt_size: size of the currently handled packet * @drvdata: driver private data * @list: lite entry for @channels in qcom_smd_edge
*/ struct qcom_smd_channel { struct qcom_smd_edge *edge;
/* * Each smd packet contains a 20 byte header, with the first 4 being the length * of the packet.
*/ #define SMD_PACKET_HEADER_LEN 20
/* * Signal the remote processor associated with 'channel'.
*/ staticvoid qcom_smd_signal_channel(struct qcom_smd_channel *channel)
{ struct qcom_smd_edge *edge = channel->edge;
if (edge->mbox_chan) { /* * We can ignore a failing mbox_send_message() as the only * possible cause is that the FIFO in the framework is full of * other writes to the same bit.
*/
mbox_send_message(edge->mbox_chan, NULL);
mbox_client_txdone(edge->mbox_chan, 0);
} else {
regmap_write(edge->ipc_regmap, edge->ipc_offset, BIT(edge->ipc_bit));
}
}
/* * Calculate the amount of data available in the rx fifo
*/ static size_t qcom_smd_channel_get_rx_avail(struct qcom_smd_channel *channel)
{ unsigned head; unsigned tail;
head = GET_RX_CHANNEL_INFO(channel, head);
tail = GET_RX_CHANNEL_INFO(channel, tail);
/* * Set tx channel state and inform the remote processor
*/ staticvoid qcom_smd_channel_set_state(struct qcom_smd_channel *channel, int state)
{ struct qcom_smd_edge *edge = channel->edge; bool is_open = state == SMD_CHANNEL_OPENED;
/* * Read out a single packet from the rx fifo and deliver it to the device
*/ staticint qcom_smd_channel_recv_single(struct qcom_smd_channel *channel)
{ struct rpmsg_endpoint *ept = &channel->qsept->ept; unsigned tail;
size_t len; void *ptr; int ret;
tail = GET_RX_CHANNEL_INFO(channel, tail);
/* Use bounce buffer if the data wraps */ if (tail + channel->pkt_size >= channel->fifo_size) {
ptr = channel->bounce_buffer;
len = qcom_smd_channel_peek(channel, ptr, channel->pkt_size);
} else {
ptr = channel->rx_fifo + tail;
len = channel->pkt_size;
}
ret = ept->cb(ept->rpdev, ptr, len, ept->priv, RPMSG_ADDR_ANY); if (ret < 0) return ret;
/* Only forward the tail if the client consumed the data */
qcom_smd_channel_advance(channel, len);
channel->pkt_size = 0;
return 0;
}
/* * Per channel interrupt handling
*/ staticbool qcom_smd_channel_intr(struct qcom_smd_channel *channel)
{ bool need_state_scan = false; int remote_state;
__le32 pktlen; int avail; int ret;
/* Handle state changes */
remote_state = GET_RX_CHANNEL_INFO(channel, state); if (remote_state != channel->remote_state) {
channel->remote_state = remote_state;
need_state_scan = true;
wake_up_interruptible_all(&channel->state_change_event);
} /* Indicate that we have seen any state change */
SET_RX_CHANNEL_FLAG(channel, fSTATE, 0);
/* Signal waiting qcom_smd_send() about the interrupt */ if (!GET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR))
wake_up_interruptible_all(&channel->fblockread_event);
/* Don't consume any data until we've opened the channel */ if (channel->state != SMD_CHANNEL_OPENED) goto out;
/* Indicate that we've seen the new data */
SET_RX_CHANNEL_FLAG(channel, fHEAD, 0);
/* Consume data */ for (;;) {
avail = qcom_smd_channel_get_rx_avail(channel);
/* Indicate that we have seen and updated tail */
SET_RX_CHANNEL_FLAG(channel, fTAIL, 1);
/* Signal the remote that we've consumed the data (if requested) */ if (!GET_RX_CHANNEL_FLAG(channel, fBLOCKREADINTR)) { /* Ensure ordering of channel info updates */
wmb();
qcom_smd_signal_channel(channel);
}
out: return need_state_scan;
}
/* * The edge interrupts are triggered by the remote processor on state changes, * channel info updates or when new channels are created.
*/ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
{ struct qcom_smd_edge *edge = data; struct qcom_smd_channel *channel; unsigned available; bool kick_scanner = false; bool kick_state = false;
/* * Handle state changes or data on each of the channels on this edge
*/
spin_lock(&edge->channels_lock);
list_for_each_entry(channel, &edge->channels, list) {
spin_lock(&channel->recv_lock);
kick_state |= qcom_smd_channel_intr(channel);
spin_unlock(&channel->recv_lock);
}
spin_unlock(&edge->channels_lock);
/* * Creating a new channel requires allocating an smem entry, so we only * have to scan if the amount of available space in smem have changed * since last scan.
*/
available = qcom_smem_get_free_space(edge->remote_pid); if (available != edge->smem_available) {
edge->smem_available = available;
kick_scanner = true;
}
if (kick_scanner)
schedule_work(&edge->scan_work); if (kick_state)
schedule_work(&edge->state_work);
return IRQ_HANDLED;
}
/* * Calculate how much space is available in the tx fifo.
*/ static size_t qcom_smd_get_tx_avail(struct qcom_smd_channel *channel)
{ unsigned head; unsigned tail; unsigned mask = channel->fifo_size - 1;
head = GET_TX_CHANNEL_INFO(channel, head);
tail = GET_TX_CHANNEL_INFO(channel, tail);
return mask - ((head - tail) & mask);
}
/* * Write count bytes of data into channel, possibly wrapping in the ring buffer
*/ staticint qcom_smd_write_fifo(struct qcom_smd_channel *channel, constvoid *data,
size_t count)
{ bool word_aligned; unsigned head;
size_t len;
word_aligned = channel->info_word;
head = GET_TX_CHANNEL_INFO(channel, head);
len = min_t(size_t, count, channel->fifo_size - head); if (len) {
smd_copy_to_fifo(channel->tx_fifo + head,
data,
len,
word_aligned);
}
if (len != count) {
smd_copy_to_fifo(channel->tx_fifo,
data + len,
count - len,
word_aligned);
}
head += count;
head &= (channel->fifo_size - 1);
SET_TX_CHANNEL_INFO(channel, head, head);
return count;
}
/** * __qcom_smd_send - write data to smd channel * @channel: channel handle * @data: buffer of data to write * @len: number of bytes to write * @wait: flag to indicate if write can wait * * This is a blocking write of len bytes into the channel's tx ring buffer and * signal the remote end. It will sleep until there is enough space available * in the tx buffer, utilizing the fBLOCKREADINTR signaling mechanism to avoid * polling.
*/ staticint __qcom_smd_send(struct qcom_smd_channel *channel, constvoid *data, int len, bool wait)
{
__le32 hdr[5] = { cpu_to_le32(len), }; int tlen = sizeof(hdr) + len; unsignedlong flags; int ret = 0;
/* Word aligned channels only accept word size aligned data */ if (channel->info_word && len % 4) return -EINVAL;
/* Reject packets that are too big */ if (tlen >= channel->fifo_size) return -EINVAL;
/* Highlight the fact that if we enter the loop below we might sleep */ if (wait)
might_sleep();
spin_lock_irqsave(&channel->tx_lock, flags);
while (qcom_smd_get_tx_avail(channel) < tlen &&
channel->state == SMD_CHANNEL_OPENED) { if (!wait) {
ret = -EAGAIN; goto out_unlock;
}
SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 0);
/* Wait without holding the tx_lock */
spin_unlock_irqrestore(&channel->tx_lock, flags);
ret = wait_event_interruptible(channel->fblockread_event,
qcom_smd_get_tx_avail(channel) >= tlen ||
channel->state != SMD_CHANNEL_OPENED); if (ret) return ret;
/* * Helper for opening a channel
*/ staticint qcom_smd_channel_open(struct qcom_smd_channel *channel,
rpmsg_rx_cb_t cb)
{ struct qcom_smd_edge *edge = channel->edge;
size_t bb_size; int ret;
/* * Packets are maximum 4k, but reduce if the fifo is smaller
*/
bb_size = min(channel->fifo_size, SZ_4K);
channel->bounce_buffer = kmalloc(bb_size, GFP_KERNEL); if (!channel->bounce_buffer) return -ENOMEM;
/* Wait for remote to enter opening or opened */
ret = wait_event_interruptible_timeout(channel->state_change_event,
channel->remote_state == SMD_CHANNEL_OPENING ||
channel->remote_state == SMD_CHANNEL_OPENED,
HZ); if (!ret) {
dev_err(&edge->dev, "remote side did not enter opening state\n"); goto out_close_timeout;
}
/* Wait for remote to enter opened */
ret = wait_event_interruptible_timeout(channel->state_change_event,
channel->remote_state == SMD_CHANNEL_OPENED,
HZ); if (!ret) {
dev_err(&edge->dev, "remote side did not enter open state\n"); goto out_close_timeout;
}
/* Wait up to HZ for the channel to appear */
ret = wait_event_interruptible_timeout(edge->new_channel_event,
(channel = qcom_smd_find_channel(edge, name)) != NULL,
HZ); if (!ret) return NULL;
if (channel->state != SMD_CHANNEL_CLOSED) {
dev_err(&rpdev->dev, "channel %s is busy\n", channel->name); return NULL;
}
qsept = kzalloc(sizeof(*qsept), GFP_KERNEL); if (!qsept) return NULL;
qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); if (!qsdev) return -ENOMEM;
/* Link qsdev to our SMD edge */
qsdev->edge = edge;
/* Assign callbacks for rpmsg_device */
qsdev->rpdev.ops = &qcom_smd_device_ops;
/* Assign public information to the rpmsg_device */
rpdev = &qsdev->rpdev;
strscpy_pad(rpdev->id.name, channel->name, RPMSG_NAME_SIZE);
rpdev->src = RPMSG_ADDR_ANY;
rpdev->dst = RPMSG_ADDR_ANY;
info = qcom_smem_get(edge->remote_pid, smem_info_item, &info_size); if (IS_ERR(info)) {
ret = PTR_ERR(info); goto free_name_and_channel;
}
/* * Use the size of the item to figure out which channel info struct to * use.
*/ if (info_size == 2 * sizeof(struct smd_channel_info_word)) {
channel->info_word = info;
} elseif (info_size == 2 * sizeof(struct smd_channel_info)) {
channel->info = info;
} else {
dev_err(&edge->dev, "channel info of size %zu not supported\n", info_size);
ret = -EINVAL; goto free_name_and_channel;
}
fifo_base = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_size); if (IS_ERR(fifo_base)) {
ret = PTR_ERR(fifo_base); goto free_name_and_channel;
}
/* The channel consist of a rx and tx fifo of equal size */
fifo_size /= 2;
/* * Scans the allocation table for any newly allocated channels, calls * qcom_smd_create_channel() to create representations of these and add * them to the edge's list of channels.
*/ staticvoid qcom_channel_scan_worker(struct work_struct *work)
{ struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge, scan_work); struct qcom_smd_alloc_entry *alloc_tbl; struct qcom_smd_alloc_entry *entry; struct qcom_smd_channel *channel; unsignedlong flags; unsigned fifo_id; unsigned info_id; int tbl; int i;
u32 eflags, cid;
for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
alloc_tbl = qcom_smem_get(edge->remote_pid,
smem_items[tbl].alloc_tbl_id, NULL); if (IS_ERR(alloc_tbl)) continue;
for (i = 0; i < SMD_ALLOC_TBL_SIZE; i++) {
entry = &alloc_tbl[i];
eflags = le32_to_cpu(entry->flags); if (test_bit(i, edge->allocated[tbl])) continue;
if (entry->ref_count == 0) continue;
if (!entry->name[0]) continue;
if (!(eflags & SMD_CHANNEL_FLAGS_PACKET)) continue;
if ((eflags & SMD_CHANNEL_FLAGS_EDGE_MASK) != edge->edge_id) continue;
/* * This per edge worker scans smem for any new channels and register these. It * then scans all registered channels for state changes that should be handled * by creating or destroying smd client devices for the registered channels. * * LOCKING: edge->channels_lock only needs to cover the list operations, as the * worker is killed before any channels are deallocated
*/ staticvoid qcom_channel_state_worker(struct work_struct *work)
{ struct qcom_smd_channel *channel; struct qcom_smd_edge *edge = container_of(work, struct qcom_smd_edge,
state_work); struct rpmsg_channel_info chinfo; unsigned remote_state; unsignedlong flags;
/* * Register a device for any closed channel where the remote processor * is showing interest in opening the channel.
*/
spin_lock_irqsave(&edge->channels_lock, flags);
list_for_each_entry(channel, &edge->channels, list) { if (channel->state != SMD_CHANNEL_CLOSED) continue;
/* * Always open rpm_requests, even when already opened which is * required on some SoCs like msm8953.
*/
remote_state = GET_RX_CHANNEL_INFO(channel, state); if (remote_state != SMD_CHANNEL_OPENING &&
remote_state != SMD_CHANNEL_OPENED &&
strcmp(channel->name, "rpm_requests")) continue;
/* * Unregister the device for any channel that is opened where the * remote processor is closing the channel.
*/
list_for_each_entry(channel, &edge->channels, list) { if (channel->state != SMD_CHANNEL_OPENING &&
channel->state != SMD_CHANNEL_OPENED) continue;
edge->mbox_client.dev = dev;
edge->mbox_client.knows_txdone = true;
edge->mbox_chan = mbox_request_channel(&edge->mbox_client, 0); if (IS_ERR(edge->mbox_chan)) { if (PTR_ERR(edge->mbox_chan) != -ENOENT) {
ret = dev_err_probe(dev, PTR_ERR(edge->mbox_chan), "failed to acquire IPC mailbox\n"); goto put_node;
}
edge->mbox_chan = NULL;
syscon_np = of_parse_phandle(node, "qcom,ipc", 0); if (!syscon_np) {
dev_err(dev, "no qcom,ipc node\n");
ret = -ENODEV; goto put_node;
}
edge->ipc_regmap = syscon_node_to_regmap(syscon_np);
of_node_put(syscon_np); if (IS_ERR(edge->ipc_regmap)) {
ret = PTR_ERR(edge->ipc_regmap);
dev_err(dev, "failed to get regmap from syscon: %d\n", ret); goto put_node;
}
key = "qcom,ipc";
ret = of_property_read_u32_index(node, key, 1, &edge->ipc_offset); if (ret < 0) {
dev_err(dev, "no offset in %s\n", key); goto put_node;
}
ret = of_property_read_u32_index(node, key, 2, &edge->ipc_bit); if (ret < 0) {
dev_err(dev, "no bit in %s\n", key); goto put_node;
}
}
ret = of_property_read_string(node, "label", &edge->name); if (ret < 0)
edge->name = node->name;
irq = irq_of_parse_and_map(node, 0); if (!irq) {
dev_err(dev, "required smd interrupt missing\n");
ret = -EINVAL; goto put_node;
}
ret = devm_request_irq(dev, irq,
qcom_smd_edge_intr, IRQF_TRIGGER_RISING,
node->name, edge); if (ret) {
dev_err(dev, "failed to request smd irq\n"); goto put_node;
}
/* * Release function for an edge. * Reset the state of each associated channel and free the edge context.
*/ staticvoid qcom_smd_edge_release(struct device *dev)
{ struct qcom_smd_channel *channel, *tmp; struct qcom_smd_edge *edge = to_smd_edge(dev);
/** * qcom_smd_register_edge() - register an edge based on an device_node * @parent: parent device for the edge * @node: device_node describing the edge * * Return: an edge reference, or negative ERR_PTR() on failure.
*/ struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, struct device_node *node)
{ struct qcom_smd_edge *edge; int ret;
if (!qcom_smem_is_available()) return ERR_PTR(-EPROBE_DEFER);
edge = kzalloc(sizeof(*edge), GFP_KERNEL); if (!edge) return ERR_PTR(-ENOMEM);
/** * qcom_smd_unregister_edge() - release an edge and its children * @edge: edge reference acquired from qcom_smd_register_edge
*/ void qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
{ int ret;
/* * Shut down all smd clients by making sure that each edge stops processing * events and scanning for new channels, then call destroy on the devices.
*/ staticvoid qcom_smd_remove(struct platform_device *pdev)
{ /* * qcom_smd_remove_edge always returns zero, so there is no need to * check the return value of device_for_each_child.
*/
device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.