// SPDX-License-Identifier: GPL-2.0 /* * core.c - Implementation of core module of MOST Linux driver stack * * Copyright (C) 2013-2020 Microchip Technology Germany II GmbH & Co. KG
*/
/** * list_pop_mbo - retrieves the first MBO of the list and removes it * @ptr: the list head to grab the MBO from.
*/ #define list_pop_mbo(ptr) \
({ \ struct mbo *_mbo = list_first_entry(ptr, struct mbo, list); \
list_del(&_mbo->list); \
_mbo; \
})
/** * most_free_mbo_coherent - free an MBO and its coherent buffer * @mbo: most buffer
*/ staticvoid most_free_mbo_coherent(struct mbo *mbo)
{ struct most_channel *c = mbo->context;
u16 const coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
if (c->iface->dma_free)
c->iface->dma_free(mbo, coherent_buf_size); else
kfree(mbo->virt_address);
kfree(mbo); if (atomic_sub_and_test(1, &c->mbo_ref))
complete(&c->cleanup);
}
if (!c) return -ENODEV;
c->cfg.num_buffers = val; return 0;
}
int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf)
{ int i; struct most_channel *c = get_channel(mdev, mdev_ch);
if (!c) return -ENODEV; for (i = 0; i < ARRAY_SIZE(ch_data_type); i++) { if (!strcmp(buf, ch_data_type[i].name)) {
c->cfg.data_type = ch_data_type[i].most_ch_data_type; break;
}
}
if (i == ARRAY_SIZE(ch_data_type))
dev_warn(&c->dev, "Invalid attribute settings\n"); return 0;
}
/** * arm_mbo - recycle MBO for further usage * @mbo: most buffer * * This puts an MBO back to the list to have it ready for up coming * tx transactions. * * In case the MBO belongs to a channel that recently has been * poisoned, the MBO is scheduled to be trashed. * Calls the completion handler of an attached component.
*/ staticvoid arm_mbo(struct mbo *mbo)
{ unsignedlong flags; struct most_channel *c;
if (c->pipe0.refs && c->pipe0.comp->tx_completion)
c->pipe0.comp->tx_completion(c->iface, c->channel_id);
if (c->pipe1.refs && c->pipe1.comp->tx_completion)
c->pipe1.comp->tx_completion(c->iface, c->channel_id);
}
/** * arm_mbo_chain - helper function that arms an MBO chain for the HDM * @c: pointer to interface channel * @dir: direction of the channel * @compl: pointer to completion function * * This allocates buffer objects including the containing DMA coherent * buffer and puts them in the fifo. * Buffers of Rx channels are put in the kthread fifo, hence immediately * submitted to the HDM. * * Returns the number of allocated and enqueued MBOs.
*/ staticint arm_mbo_chain(struct most_channel *c, int dir, void (*compl)(struct mbo *))
{ unsignedint i; struct mbo *mbo; unsignedlong flags;
u32 coherent_buf_size = c->cfg.buffer_size + c->cfg.extra_len;
atomic_set(&c->mbo_nq_level, 0);
for (i = 0; i < c->cfg.num_buffers; i++) {
mbo = kzalloc(sizeof(*mbo), GFP_KERNEL); if (!mbo) goto flush_fifos;
/** * most_write_completion - write completion handler * @mbo: most buffer * * This recycles the MBO for further usage. In case the channel has been * poisoned, the MBO is scheduled to be trashed.
*/ staticvoid most_write_completion(struct mbo *mbo)
{ struct most_channel *c;
c = mbo->context; if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE)))
trash_mbo(mbo); else
arm_mbo(mbo);
}
int channel_has_mbo(struct most_interface *iface, int id, struct most_component *comp)
{ struct most_channel *c = iface->p->channel[id]; unsignedlong flags; int empty;
/** * most_get_mbo - get pointer to an MBO of pool * @iface: pointer to interface instance * @id: channel ID * @comp: driver component * * This attempts to get a free buffer out of the channel fifo. * Returns a pointer to MBO on success or NULL otherwise.
*/ struct mbo *most_get_mbo(struct most_interface *iface, int id, struct most_component *comp)
{ struct mbo *mbo; struct most_channel *c; unsignedlong flags; int *num_buffers_ptr;
c = iface->p->channel[id]; if (unlikely(!c)) return NULL;
/** * most_read_completion - read completion handler * @mbo: most buffer * * This function is called by the HDM when data has been received from the * hardware and copied to the buffer of the MBO. * * In case the channel has been poisoned it puts the buffer in the trash queue. * Otherwise, it passes the buffer to an component for further processing.
*/ staticvoid most_read_completion(struct mbo *mbo)
{ struct most_channel *c = mbo->context;
if (unlikely(c->is_poisoned || (mbo->status == MBO_E_CLOSE))) {
trash_mbo(mbo); return;
}
if (mbo->status == MBO_E_INVAL) {
nq_hdm_mbo(mbo);
atomic_inc(&c->mbo_nq_level); return;
}
if (atomic_sub_and_test(1, &c->mbo_nq_level))
c->is_starving = 1;
if (c->pipe0.refs && c->pipe0.comp->rx_completion &&
c->pipe0.comp->rx_completion(mbo) == 0) return;
if (c->pipe1.refs && c->pipe1.comp->rx_completion &&
c->pipe1.comp->rx_completion(mbo) == 0) return;
most_put_mbo(mbo);
}
/** * most_start_channel - prepares a channel for communication * @iface: pointer to interface instance * @id: channel ID * @comp: driver component * * This prepares the channel for usage. Cross-checks whether the * channel's been properly configured. * * Returns 0 on success or error code otherwise.
*/ int most_start_channel(struct most_interface *iface, int id, struct most_component *comp)
{ int num_buffer; int ret; struct most_channel *c = iface->p->channel[id];
if (unlikely(!c)) return -EINVAL;
mutex_lock(&c->start_mutex); if (c->pipe0.refs + c->pipe1.refs > 0) goto out; /* already started by another component */
if (!try_module_get(iface->mod)) {
dev_err(&c->dev, "Failed to acquire HDM lock\n");
mutex_unlock(&c->start_mutex); return -ENOLCK;
}
c->cfg.extra_len = 0; if (c->iface->configure(c->iface, c->channel_id, &c->cfg)) {
dev_err(&c->dev, "Channel configuration failed. Go check settings...\n");
ret = -EINVAL; goto err_put_module;
}
init_waitqueue_head(&c->hdm_fifo_wq);
if (c->cfg.direction == MOST_CH_RX)
num_buffer = arm_mbo_chain(c, c->cfg.direction,
most_read_completion); else
num_buffer = arm_mbo_chain(c, c->cfg.direction,
most_write_completion); if (unlikely(!num_buffer)) {
ret = -ENOMEM; goto err_put_module;
}
ret = run_enqueue_thread(c, id); if (ret) goto err_put_module;
/** * most_stop_channel - stops a running channel * @iface: pointer to interface instance * @id: channel ID * @comp: driver component
*/ int most_stop_channel(struct most_interface *iface, int id, struct most_component *comp)
{ struct most_channel *c;
if (unlikely((!iface) || (id >= iface->num_channels) || (id < 0))) {
pr_err("Bad interface or index out of range\n"); return -EINVAL;
}
c = iface->p->channel[id]; if (unlikely(!c)) return -EINVAL;
mutex_lock(&c->start_mutex); if (c->pipe0.refs + c->pipe1.refs >= 2) goto out;
if (c->hdm_enqueue_task)
kthread_stop(c->hdm_enqueue_task);
c->hdm_enqueue_task = NULL;
if (iface->mod)
module_put(iface->mod);
c->is_poisoned = true; if (c->iface->poison_channel(c->iface, c->channel_id)) {
dev_err(&c->dev, "Failed to stop channel %d of interface %s\n", c->channel_id,
c->iface->description);
mutex_unlock(&c->start_mutex); return -EAGAIN;
}
flush_trash_fifo(c);
flush_channel_fifos(c);
#ifdef CMPL_INTERRUPTIBLE if (wait_for_completion_interruptible(&c->cleanup)) {
dev_err(&c->dev, "Interrupted while cleaning up channel %d\n", c->channel_id);
mutex_unlock(&c->start_mutex); return -EINTR;
} #else
wait_for_completion(&c->cleanup); #endif
c->is_poisoned = false;
out: if (comp == c->pipe0.comp)
c->pipe0.refs--; if (comp == c->pipe1.comp)
c->pipe1.refs--;
mutex_unlock(&c->start_mutex); return 0;
}
EXPORT_SYMBOL_GPL(most_stop_channel);
/** * most_register_component - registers a driver component with the core * @comp: driver component
*/ int most_register_component(struct most_component *comp)
{ if (!comp) {
pr_err("Bad component\n"); return -EINVAL;
}
list_add_tail(&comp->list, &comp_list); return 0;
}
EXPORT_SYMBOL_GPL(most_register_component);
/** * most_register_interface - registers an interface with core * @iface: device interface * * Allocates and initializes a new interface instance and all of its channels. * Returns a pointer to kobject or an error pointer.
*/ int most_register_interface(struct most_interface *iface)
{ unsignedint i; int id; struct most_channel *c;
err_free_resources: while (i > 0) {
c = iface->p->channel[--i];
device_unregister(&c->dev);
}
kfree(iface->p);
device_unregister(iface->dev);
ida_free(&mdev_id, id); return -ENOMEM;
}
EXPORT_SYMBOL_GPL(most_register_interface);
/** * most_deregister_interface - deregisters an interface with core * @iface: device interface * * Before removing an interface instance from the list, all running * channels are stopped and poisoned.
*/ void most_deregister_interface(struct most_interface *iface)
{ int i; struct most_channel *c;
for (i = 0; i < iface->num_channels; i++) {
c = iface->p->channel[i]; if (c->pipe0.comp)
c->pipe0.comp->disconnect_channel(c->iface,
c->channel_id); if (c->pipe1.comp)
c->pipe1.comp->disconnect_channel(c->iface,
c->channel_id);
c->pipe0.comp = NULL;
c->pipe1.comp = NULL;
list_del(&c->list);
device_unregister(&c->dev);
}
/** * most_stop_enqueue - prevents core from enqueueing MBOs * @iface: pointer to interface * @id: channel id * * This is called by an HDM that _cannot_ attend to its duties and * is imminent to get run over by the core. The core is not going to * enqueue any further packets unless the flagging HDM calls * most_resume enqueue().
*/ void most_stop_enqueue(struct most_interface *iface, int id)
{ struct most_channel *c = iface->p->channel[id];
/** * most_resume_enqueue - allow core to enqueue MBOs again * @iface: pointer to interface * @id: channel id * * This clears the enqueue halt flag and enqueues all MBOs currently * sitting in the wait fifo.
*/ void most_resume_enqueue(struct most_interface *iface, int id)
{ struct most_channel *c = iface->p->channel[id];
subsys_initcall(most_init);
module_exit(most_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Christian Gromm ");
MODULE_DESCRIPTION("Core module of stacked MOST Linux driver");
Messung V0.5
¤ Dauer der Verarbeitung: 0.16 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.