// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
*/
/* * This code implements the DMA subsystem. It provides a HW-neutral interface * for other kernel code to use asynchronous memory copy capabilities, * if present, and allows different HW DMA drivers to register as providing * this capability. * * Due to the fact we are accelerating what is already a relatively fast * operation, the code goes to great lengths to avoid additional overhead, * such as locking. * * LOCKING: * * The subsystem keeps a global list of dma_device structs it is protected by a * mutex, dma_list_mutex. * * A subsystem can get access to a channel by calling dmaengine_get() followed * by dma_find_channel(), or if it has need for an exclusive channel it can call * dma_request_channel(). Once a channel is allocated a reference is taken * against its corresponding driver to disable removal. * * Each device has a channels list, which runs unlocked but is never modified * once the device is registered, it's just setup by the driver. * * See Documentation/driver-api/dmaengine for more details
*/
/** * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev: device node * * Must be called under dma_list_mutex.
*/ staticstruct dma_chan *dev_to_dma_chan(struct device *dev)
{ struct dma_chan_dev *chan_dev;
/* 'interrupt', 'private', and 'slave' are channel capabilities, * but are not associated with an operation so they do not need * an entry in the channel_table
*/
clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
/** * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU * @chan: DMA channel to test * @cpu: CPU index which the channel should be close to * * Returns true if the channel is in the same NUMA-node as the CPU.
*/ staticbool dma_chan_is_local(struct dma_chan *chan, int cpu)
{ int node = dev_to_node(chan->device->dev); return node == NUMA_NO_NODE ||
cpumask_test_cpu(cpu, cpumask_of_node(node));
}
/** * min_chan - finds the channel with min count and in the same NUMA-node as the CPU * @cap: capability to match * @cpu: CPU index which the channel should be close to * * If some channels are close to the given CPU, the one with the lowest * reference count is returned. Otherwise, CPU is ignored and only the * reference count is taken into account. * * Must be called under dma_list_mutex.
*/ staticstruct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
{ struct dma_device *device; struct dma_chan *chan; struct dma_chan *min = NULL; struct dma_chan *localmin = NULL;
list_for_each_entry(device, &dma_device_list, global_node) { if (!dma_has_cap(cap, device->cap_mask) ||
dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue;
list_for_each_entry(chan, &device->channels, device_node) { if (!chan->client_count) continue; if (!min || chan->table_count < min->table_count)
min = chan;
if (dma_chan_is_local(chan, cpu)) if (!localmin ||
chan->table_count < localmin->table_count)
localmin = chan;
}
}
chan = localmin ? localmin : min;
if (chan)
chan->table_count++;
return chan;
}
/** * dma_channel_rebalance - redistribute the available channels * * Optimize for CPU isolation (each CPU gets a dedicated channel for an * operation type) in the SMP case, and operation isolation (avoid * multi-tasking channels) in the non-SMP case. * * Must be called under dma_list_mutex.
*/ staticvoid dma_channel_rebalance(void)
{ struct dma_chan *chan; struct dma_device *device; int cpu; int cap;
/* undo the last distribution */
for_each_dma_cap_mask(cap, dma_cap_mask_all)
for_each_possible_cpu(cpu)
per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
/** * balance_ref_count - catch up the channel reference count * @chan: channel to balance ->client_count versus dmaengine_ref_count * * Must be called under dma_list_mutex.
*/ staticvoid balance_ref_count(struct dma_chan *chan)
{ struct module *owner = dma_chan_to_owner(chan);
while (chan->client_count < dmaengine_ref_count) {
__module_get(owner);
chan->client_count++;
}
}
/** * dma_chan_get - try to grab a DMA channel's parent driver module * @chan: channel to grab * * Must be called under dma_list_mutex.
*/ staticint dma_chan_get(struct dma_chan *chan)
{ struct module *owner = dma_chan_to_owner(chan); int ret;
/* The channel is already in use, update client count */ if (chan->client_count) {
__module_get(owner);
chan->client_count++; return 0;
}
if (!try_module_get(owner)) return -ENODEV;
ret = kref_get_unless_zero(&chan->device->ref); if (!ret) {
ret = -ENODEV; goto module_put_out;
}
/* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) {
ret = chan->device->device_alloc_chan_resources(chan); if (ret < 0) goto err_out;
}
chan->client_count++;
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
/** * dma_chan_put - drop a reference to a DMA channel's parent driver module * @chan: channel to release * * Must be called under dma_list_mutex.
*/ staticvoid dma_chan_put(struct dma_chan *chan)
{ /* This channel is not in use, bail out */ if (!chan->client_count) return;
chan->client_count--;
/* This channel is not in use anymore, free it */ if (!chan->client_count && chan->device->device_free_chan_resources) { /* Make sure all operations have completed */
dmaengine_synchronize(chan);
chan->device->device_free_chan_resources(chan);
}
/* If the channel is used via a DMA request router, free the mapping */ if (chan->router && chan->router->route_free) {
chan->router->route_free(chan->router->dev, chan->route_data);
chan->router = NULL;
chan->route_data = NULL;
}
/* check if the channel supports slave transactions */ if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
test_bit(DMA_CYCLIC, device->cap_mask.bits))) return -ENXIO;
/* * Check whether it reports it uses the generic slave * capabilities, if not, that means it doesn't support any * kind of slave capabilities reporting.
*/ if (!device->directions) return -ENXIO;
/* * DMA engine device might be configured with non-uniformly * distributed slave capabilities per device channels. In this * case the corresponding driver may provide the device_caps * callback to override the generic capabilities with * channel-specific ones.
*/ if (device->device_caps)
device->device_caps(chan, caps);
if (mask && !dma_device_satisfies_mask(dev, mask)) {
dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); return NULL;
} /* devices with multiple channels need special handling as we need to * ensure that all channels are either private or public.
*/ if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
list_for_each_entry(chan, &dev->channels, device_node) { /* some channels are already publicly allocated */ if (chan->client_count) return NULL;
}
if (chan) { /* Found a suitable channel, try to grab, prep, and return it. * We first set DMA_PRIVATE to disable balance_ref_count as this * channel will not be published in the general-purpose * allocator
*/
dma_cap_set(DMA_PRIVATE, device->cap_mask);
device->privatecnt++;
err = dma_chan_get(chan);
if (err) { if (err == -ENODEV) {
dev_dbg(device->dev, "%s: %s module removed\n",
__func__, dma_chan_name(chan));
list_del_rcu(&device->global_node);
} else
dev_dbg(device->dev, "%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
chan = ERR_PTR(err);
}
}
return chan ? chan : ERR_PTR(-EPROBE_DEFER);
}
/** * dma_get_slave_channel - try to get specific channel exclusively * @chan: target channel
*/ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
{ /* lock against __dma_request_channel */
mutex_lock(&dma_list_mutex);
if (chan->client_count == 0) { struct dma_device *device = chan->device; int err;
dma_cap_set(DMA_PRIVATE, device->cap_mask);
device->privatecnt++;
err = dma_chan_get(chan); if (err) {
dev_dbg(chan->device->dev, "%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
chan = NULL; if (--device->privatecnt == 0)
dma_cap_clear(DMA_PRIVATE, device->cap_mask);
}
} else
chan = NULL;
if (IS_ERR(chan)) return chan; if (!chan) return ERR_PTR(-EPROBE_DEFER);
found: #ifdef CONFIG_DEBUG_FS
chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), name); /* No functional issue if it fails, users are supposed to test before use */ #endif
/** * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities * @mask: capabilities that the channel must satisfy * * Returns pointer to appropriate DMA channel on success or an error pointer.
*/ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
{ struct dma_chan *chan;
/** * devm_dma_request_chan - try to allocate an exclusive slave channel * @dev: pointer to client device structure * @name: slave channel name * * Returns pointer to appropriate DMA channel on success or an error pointer. * * The operation is managed and will be undone on driver detach.
*/
struct dma_chan *devm_dma_request_chan(struct device *dev, constchar *name)
{ struct dma_chan *chan = dma_request_chan(dev, name); int ret = 0;
if (!IS_ERR(chan))
ret = devm_add_action_or_reset(dev, dmaenginem_release_channel, chan);
/* try to grab channels */
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue;
list_for_each_entry(chan, &device->channels, device_node) {
err = dma_chan_get(chan); if (err == -ENODEV) { /* module removed before we could use it */
list_del_rcu(&device->global_node); break;
} elseif (err)
dev_dbg(chan->device->dev, "%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
}
}
/* if this is the first reference and there were channels * waiting we need to rebalance to get those channels * incorporated into the channel table
*/ if (dmaengine_ref_count == 1)
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL(dmaengine_get);
/** * dmaengine_put - let DMA drivers be removed when ref_count == 0
*/ void dmaengine_put(void)
{ struct dma_device *device, *_d; struct dma_chan *chan;
staticbool device_has_all_tx_types(struct dma_device *device)
{ /* A device that satisfies this test has channels that will never cause * an async_tx channel switch event as all possible operation types can * be handled.
*/ #ifdef CONFIG_ASYNC_TX_DMA if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) returnfalse; #endif
#if IS_ENABLED(CONFIG_ASYNC_MEMCPY) if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) returnfalse; #endif
#if IS_ENABLED(CONFIG_ASYNC_XOR) if (!dma_has_cap(DMA_XOR, device->cap_mask)) returnfalse;
#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) returnfalse; #endif #endif
#if IS_ENABLED(CONFIG_ASYNC_PQ) if (!dma_has_cap(DMA_PQ, device->cap_mask)) returnfalse;
#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) returnfalse; #endif #endif
returntrue;
}
staticint get_dma_id(struct dma_device *device)
{ int rc = ida_alloc(&dma_ida, GFP_KERNEL);
chan->local = alloc_percpu(typeof(*chan->local)); if (!chan->local) return -ENOMEM;
chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); if (!chan->dev) {
rc = -ENOMEM; goto err_free_local;
}
/* * When the chan_id is a negative value, we are dynamically adding * the channel. Otherwise we are static enumerating.
*/
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id);
rc = chan->chan_id; goto err_free_dev;
}
/** * dma_async_device_register - registers DMA devices found * @device: pointer to &struct dma_device * * After calling this routine the structure should not be freed except in the * device_release() callback which will be called after * dma_async_device_unregister() is called and no further references are taken.
*/ int dma_async_device_register(struct dma_device *device)
{ int rc; struct dma_chan* chan;
if (!device) return -ENODEV;
/* validate device routines */ if (!device->dev) {
pr_err("DMAdevice must have dev\n"); return -EIO;
}
device->owner = device->dev->driver->owner;
#define CHECK_CAP(_name, _type) \
{ \ if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
dev_err(device->dev, \ "Device claims capability %s, but op is not defined\n", \
__stringify(_type)); \ return -EIO; \
} \
}
if (!device->device_tx_status) {
dev_err(device->dev, "Device tx_status is not defined\n"); return -EIO;
}
if (!device->device_issue_pending) {
dev_err(device->dev, "Device issue_pending is not defined\n"); return -EIO;
}
if (!device->device_release)
dev_dbg(device->dev, "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
kref_init(&device->ref);
/* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
*/ if (device_has_all_tx_types(device))
dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
rc = get_dma_id(device); if (rc != 0) return rc;
ida_init(&device->chan_ida);
/* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) {
rc = __dma_async_device_channel_register(device, chan, NULL); if (rc < 0) goto err_out;
}
mutex_lock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
list_for_each_entry(chan, &device->channels, device_node) { /* if clients are already waiting for channels we need * to take references on their behalf
*/ if (dma_chan_get(chan) == -ENODEV) { /* note we can only get here for the first * channel as the remaining channels are * guaranteed to get a reference
*/
rc = -ENODEV;
mutex_unlock(&dma_list_mutex); goto err_out;
}
}
list_add_tail_rcu(&device->global_node, &dma_device_list); if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
device->privatecnt++; /* Always private */
dma_channel_rebalance();
mutex_unlock(&dma_list_mutex);
dmaengine_debug_register(device);
return 0;
err_out: /* if we never registered a channel just release the idr */ if (!device->chancnt) {
ida_free(&dma_ida, device->dev_id); return rc;
}
/** * dma_async_device_unregister - unregister a DMA device * @device: pointer to &struct dma_device * * This routine is called by dma driver exit routines, dmaengine holds module * references to prevent it being called while channels are in use.
*/ void dma_async_device_unregister(struct dma_device *device)
{ struct dma_chan *chan, *n;
dmaengine_debug_unregister(device);
list_for_each_entry_safe(chan, n, &device->channels, device_node)
__dma_async_device_channel_unregister(device, chan);
mutex_lock(&dma_list_mutex); /* * setting DMA_PRIVATE ensures the device being torn down will not * be used in the channel_table
*/
dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance();
ida_free(&dma_ida, device->dev_id);
dma_device_put(device);
mutex_unlock(&dma_list_mutex);
}
EXPORT_SYMBOL(dma_async_device_unregister);
/** * dmaenginem_async_device_register - registers DMA devices found * @device: pointer to &struct dma_device * * The operation is managed and will be undone on driver detach.
*/ int dmaenginem_async_device_register(struct dma_device *device)
{ int ret;
ret = dma_async_device_register(device); if (ret) return ret;
/** * dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on
*/ enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
{ unsignedlong dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
if (!tx) return DMA_COMPLETE;
while (tx->cookie == -EBUSY) { if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
dev_err(tx->chan->device->dev, "%s timeout waiting for descriptor submission\n",
__func__); return DMA_ERROR;
}
cpu_relax();
} return dma_sync_wait(tx->chan, tx->cookie);
}
EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
/** * dma_run_dependencies - process dependent operations on the target channel * @tx: transaction with dependencies * * Helper routine for DMA drivers to process (start) dependent operations * on their target channel.
*/ void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
{ struct dma_async_tx_descriptor *dep = txd_next(tx); struct dma_async_tx_descriptor *dep_next; struct dma_chan *chan;
if (!dep) return;
/* we'll submit tx->next now, so clear the link */
txd_clear_next(tx);
chan = dep->chan;
/* keep submitting up until a channel switch is detected * in that case we will be called again as a result of * processing the interrupt from async_tx_channel_switch
*/ for (; dep; dep = dep_next) {
txd_lock(dep);
txd_clear_parent(dep);
dep_next = txd_next(dep); if (dep_next && dep_next->chan == chan)
txd_clear_next(dep); /* ->next will be submitted */ else
dep_next = NULL; /* submit current dep and terminate */
txd_unlock(dep);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.