/* * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure * with the DMAengine framework. The generic IIO DMA buffer infrastructure is * used to manage the buffer memory and implement the IIO buffer operations * while the DMAengine framework is used to perform the DMA transfers. Combined * this results in a device independent fully functional DMA buffer * implementation that can be used by device drivers for peripherals which are * connected to a DMA controller which has a DMAengine driver implementation.
*/
/** * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine * @chan: DMA channel. * * This allocates a new IIO buffer which internally uses the DMAengine framework * to perform its transfers. * * Once done using the buffer iio_dmaengine_buffer_free() should be used to * release it.
*/ staticstruct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan)
{ struct dmaengine_buffer *dmaengine_buffer; unsignedint width, src_width, dest_width; struct dma_slave_caps caps; int ret;
ret = dma_get_slave_caps(chan, &caps); if (ret < 0) return ERR_PTR(ret);
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL); if (!dmaengine_buffer) return ERR_PTR(-ENOMEM);
/* Needs to be aligned to the maximum of the minimums */ if (caps.src_addr_widths)
src_width = __ffs(caps.src_addr_widths); else
src_width = 1; if (caps.dst_addr_widths)
dest_width = __ffs(caps.dst_addr_widths); else
dest_width = 1;
width = max(src_width, dest_width);
buffer = iio_dmaengine_buffer_alloc(chan); if (IS_ERR(buffer)) return ERR_CAST(buffer);
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
buffer->direction = dir;
ret = iio_device_attach_buffer(indio_dev, buffer); if (ret) {
iio_dmaengine_buffer_free(buffer); return ERR_PTR(ret);
}
return buffer;
}
/** * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device * @dev: DMA channel consumer device * @indio_dev: IIO device to which to attach this buffer. * @channel: DMA channel name, typically "rx". * @dir: Direction of buffer (in or out) * * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * and attaches it to an IIO device with iio_device_attach_buffer(). * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device. * * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to * release it.
*/ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, constchar *channel, enum iio_buffer_direction dir)
{ struct dma_chan *chan; struct iio_buffer *buffer;
chan = dma_request_chan(dev, channel); if (IS_ERR(chan)) return ERR_CAST(chan);
buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); if (IS_ERR(buffer))
dma_release_channel(chan);
/** * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device * @dev: Device for devm ownership and DMA channel consumer device * @indio_dev: IIO device to which to attach this buffer. * @channel: DMA channel name, typically "rx". * @dir: Direction of buffer (in or out) * * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * and attaches it to an IIO device with iio_device_attach_buffer(). * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device.
*/ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev, struct iio_dev *indio_dev, constchar *channel, enum iio_buffer_direction dir)
{ struct iio_buffer *buffer;
buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir); if (IS_ERR(buffer)) return PTR_ERR(buffer);
/** * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an * IIO device * @dev: Device for devm ownership * @indio_dev: IIO device to which to attach this buffer. * @chan: DMA channel * @dir: Direction of buffer (in or out) * * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc() * and attaches it to an IIO device with iio_device_attach_buffer(). * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the * IIO device. * * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the * caller manages requesting and releasing the DMA channel handle.
*/ int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev, struct iio_dev *indio_dev, struct dma_chan *chan, enum iio_buffer_direction dir)
{ struct iio_buffer *buffer;
buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir); if (IS_ERR(buffer)) return PTR_ERR(buffer);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.