staticbool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
size_t to_wait, int to_flush)
{
size_t avail; int flushed = 0;
/* wakeup if the device was unregistered */ if (!indio_dev->info) returntrue;
/* drain the buffer if it was disabled */ if (!iio_buffer_is_active(buf)) {
to_wait = min_t(size_t, to_wait, 1);
to_flush = 0;
}
avail = iio_buffer_data_available(buf);
if (avail >= to_wait) { /* force a flush for non-blocking reads */ if (!to_wait && avail < to_flush)
iio_buffer_flush_hwfifo(indio_dev, buf,
to_flush - avail); returntrue;
}
if (to_flush)
flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
to_wait - avail); if (flushed <= 0) returnfalse;
if (avail + flushed >= to_wait) returntrue;
returnfalse;
}
/** * iio_buffer_read() - chrdev read for buffer access * @filp: File structure pointer for the char device * @buf: Destination buffer for iio buffer read * @n: First n bytes to read * @f_ps: Long offset provided by the user as a seek position * * This function relies on all buffer implementations having an * iio_buffer as their first element. * * Return: negative values corresponding to error codes or ret != 0 * for ending the reading activity
**/ static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
size_t n, loff_t *f_ps)
{ struct iio_dev_buffer_pair *ib = filp->private_data; struct iio_buffer *rb = ib->buffer; struct iio_dev *indio_dev = ib->indio_dev;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
size_t datum_size;
size_t to_wait; int ret = 0;
if (!indio_dev->info) return -ENODEV;
if (!rb || !rb->access->read) return -EINVAL;
if (rb->direction != IIO_BUFFER_DIRECTION_IN) return -EPERM;
datum_size = rb->bytes_per_datum;
/* * If datum_size is 0 there will never be anything to read from the * buffer, so signal end of file now.
*/ if (!datum_size) return 0;
if (filp->f_flags & O_NONBLOCK)
to_wait = 0; else
to_wait = min_t(size_t, n / datum_size, rb->watermark);
add_wait_queue(&rb->pollq, &wait); do { if (!indio_dev->info) {
ret = -ENODEV; break;
}
if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { if (signal_pending(current)) {
ret = -ERESTARTSYS; break;
}
ret = rb->access->read(rb, n, buf); if (ret == 0 && (filp->f_flags & O_NONBLOCK))
ret = -EAGAIN;
} while (ret == 0);
remove_wait_queue(&rb->pollq, &wait);
return ret;
}
static size_t iio_buffer_space_available(struct iio_buffer *buf)
{ if (buf->access->space_available) return buf->access->space_available(buf);
ret = rb->access->write(rb, n - written, buf + written); if (ret < 0) break;
written += ret;
} while (written != n);
remove_wait_queue(&rb->pollq, &wait);
return ret < 0 ? ret : written;
}
/** * iio_buffer_poll() - poll the buffer to find out if it has data * @filp: File structure pointer for device access * @wait: Poll table structure pointer for which the driver adds * a wait queue * * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading * or 0 for other cases
*/ static __poll_t iio_buffer_poll(struct file *filp, struct poll_table_struct *wait)
{ struct iio_dev_buffer_pair *ib = filp->private_data; struct iio_buffer *rb = ib->buffer; struct iio_dev *indio_dev = ib->indio_dev;
if (!indio_dev->info || !rb) return 0;
poll_wait(filp, &rb->pollq, wait);
switch (rb->direction) { case IIO_BUFFER_DIRECTION_IN: if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) return EPOLLIN | EPOLLRDNORM; break; case IIO_BUFFER_DIRECTION_OUT: if (iio_buffer_space_available(rb)) return EPOLLOUT | EPOLLWRNORM; break;
}
/* check if buffer was opened through new API */ if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) return 0;
return iio_buffer_poll(filp, wait);
}
/** * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue * @indio_dev: The IIO device * * Wakes up the event waitqueue used for poll(). Should usually * be called when the device is unregistered.
*/ void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
{ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; unsignedint i;
for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
buffer = iio_dev_opaque->attached_buffers[i];
wake_up(&buffer->pollq);
}
}
int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
{ if (!buffer || !buffer->access || !buffer->access->remove_from) return -EINVAL;
/* Ensure ret is 0 or 1. */
ret = !!test_bit(to_iio_dev_attr(attr)->address,
buffer->scan_mask);
return sysfs_emit(buf, "%d\n", ret);
}
/* Note NULL used as error indicator as it doesn't make sense. */ staticconstunsignedlong *iio_scan_mask_match(constunsignedlong *av_masks, unsignedint masklength, constunsignedlong *mask, bool strict)
{ if (bitmap_empty(mask, masklength)) return NULL; /* * The condition here do not handle multi-long masks correctly. * It only checks the first long to be zero, and will use such mask * as a terminator even if there was bits set after the first long. * * Correct check would require using: * while (!bitmap_empty(av_masks, masklength)) * instead. This is potentially hazardous because the * avaliable_scan_masks is a zero terminated array of longs - and * using the proper bitmap_empty() check for multi-long wide masks * would require the array to be terminated with multiple zero longs - * which is not such an usual pattern. * * As writing of this no multi-long wide masks were found in-tree, so * the simple while (*av_masks) check is working.
*/ while (*av_masks) { if (strict) { if (bitmap_equal(mask, av_masks, masklength)) return av_masks;
} else { if (bitmap_subset(mask, av_masks, masklength)) return av_masks;
}
av_masks += BITS_TO_LONGS(masklength);
} return NULL;
}
staticbool iio_validate_scan_mask(struct iio_dev *indio_dev, constunsignedlong *mask)
{ if (!indio_dev->setup_ops->validate_scan_mask) returntrue;
/** * iio_scan_mask_set() - set particular bit in the scan mask * @indio_dev: the iio device * @buffer: the buffer whose scan mask we are interested in * @bit: the bit to be set. * * Note that at this point we have no way of knowing what other * buffers might request, hence this code only verifies that the * individual buffers request is plausible.
*/ staticint iio_scan_mask_set(struct iio_dev *indio_dev, struct iio_buffer *buffer, int bit)
{ unsignedint masklength = iio_get_masklength(indio_dev); constunsignedlong *mask; unsignedlong *trialmask;
if (!masklength) {
WARN(1, "Trying to set scanmask prior to registering buffer\n"); return -EINVAL;
}
ret = kstrtobool(buf, &state); if (ret < 0) return ret;
guard(mutex)(&iio_dev_opaque->mlock); if (iio_buffer_is_active(buffer)) return -EBUSY;
ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); if (ret < 0) return ret;
if (state && ret) return len;
if (state)
ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); else
ret = iio_scan_mask_clear(buffer, this_attr->address); if (ret) return ret;
staticint iio_compute_scan_bytes(struct iio_dev *indio_dev, constunsignedlong *mask, bool timestamp)
{ unsignedint bytes = 0; int length, i, largest = 0;
/* How much space will the demuxed element take? */
for_each_set_bit(i, mask, iio_get_masklength(indio_dev)) {
length = iio_storage_bytes_for_si(indio_dev, i); if (length < 0) return length;
staticint iio_buffer_request_update(struct iio_dev *indio_dev, struct iio_buffer *buffer)
{ int ret;
iio_buffer_update_bytes_per_datum(indio_dev, buffer); if (buffer->access->request_update) {
ret = buffer->access->request_update(buffer); if (ret) {
dev_dbg(&indio_dev->dev, "Buffer not started: buffer parameter update failed (%d)\n",
ret); return ret;
}
}
return 0;
}
staticvoid iio_free_scan_mask(struct iio_dev *indio_dev, constunsignedlong *mask)
{ /* If the mask is dynamically allocated free it, otherwise do nothing */ if (!indio_dev->available_scan_masks)
bitmap_free(mask);
}
if (insert_buffer &&
bitmap_empty(insert_buffer->scan_mask, masklength)) {
dev_dbg(&indio_dev->dev, "At least one scan element must be enabled first\n"); return -EINVAL;
}
/* * If there is just one buffer and we are removing it there is nothing * to verify.
*/ if (remove_buffer && !insert_buffer &&
list_is_singular(&iio_dev_opaque->buffer_list)) return 0;
/* Definitely possible for devices to support both of these. */ if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
config->mode = INDIO_BUFFER_TRIGGERED;
} elseif (modes & INDIO_BUFFER_HARDWARE) { /* * Keep things simple for now and only allow a single buffer to * be connected in hardware mode.
*/ if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) return -EINVAL;
config->mode = INDIO_BUFFER_HARDWARE;
strict_scanmask = true;
} elseif (modes & INDIO_BUFFER_SOFTWARE) {
config->mode = INDIO_BUFFER_SOFTWARE;
} else { /* Can only occur on first buffer */ if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); return -EINVAL;
}
/* What scan mask do we actually have? */
compound_mask = bitmap_zalloc(masklength, GFP_KERNEL); if (!compound_mask) return -ENOMEM;
/** * struct iio_demux_table - table describing demux memcpy ops * @from: index to copy from * @to: index to copy to * @length: how many bytes to copy * @l: list head used for management
*/ struct iio_demux_table { unsignedint from; unsignedint to; unsignedint length; struct list_head l;
};
/* Clear out any old demux */
iio_buffer_demux_free(buffer);
kfree(buffer->demux_bounce);
buffer->demux_bounce = NULL;
/* First work out which scan mode we will actually have */ if (bitmap_equal(indio_dev->active_scan_mask,
buffer->scan_mask, masklength)) return 0;
/* Now we have the two masks, work from least sig and build up sizes */
for_each_set_bit(out_ind, buffer->scan_mask, masklength) {
in_ind = find_next_bit(indio_dev->active_scan_mask,
masklength, in_ind + 1); while (in_ind != out_ind) {
ret = iio_storage_bytes_for_si(indio_dev, in_ind); if (ret < 0) goto error_clear_mux_table;
length = ret; /* Make sure we are aligned */
in_loc = roundup(in_loc, length) + length;
in_ind = find_next_bit(indio_dev->active_scan_mask,
masklength, in_ind + 1);
}
ret = iio_storage_bytes_for_si(indio_dev, in_ind); if (ret < 0) goto error_clear_mux_table;
length = ret;
out_loc = roundup(out_loc, length);
in_loc = roundup(in_loc, length);
ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); if (ret) goto error_clear_mux_table;
out_loc += length;
in_loc += length;
} /* Relies on scan_timestamp being last */ if (buffer->scan_timestamp) {
ret = iio_storage_bytes_for_timestamp(indio_dev); if (ret < 0) goto error_clear_mux_table;
/* Wind up again */ if (indio_dev->setup_ops->preenable) {
ret = indio_dev->setup_ops->preenable(indio_dev); if (ret) {
dev_dbg(&indio_dev->dev, "Buffer not started: buffer preenable failed (%d)\n", ret); goto err_undo_config;
}
}
if (indio_dev->info->update_scan_mode) {
ret = indio_dev->info
->update_scan_mode(indio_dev,
indio_dev->active_scan_mask); if (ret < 0) {
dev_dbg(&indio_dev->dev, "Buffer not started: update scan mode failed (%d)\n",
ret); goto err_run_postdisable;
}
}
if (indio_dev->info->hwfifo_set_watermark)
indio_dev->info->hwfifo_set_watermark(indio_dev,
config->watermark);
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_buffer_enable(buffer, indio_dev); if (ret) {
tmp = buffer; goto err_disable_buffers;
}
}
if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
ret = iio_trigger_attach_poll_func(indio_dev->trig,
indio_dev->pollfunc); if (ret) goto err_disable_buffers;
}
if (indio_dev->setup_ops->postenable) {
ret = indio_dev->setup_ops->postenable(indio_dev); if (ret) {
dev_dbg(&indio_dev->dev, "Buffer not started: postenable failed (%d)\n", ret); goto err_detach_pollfunc;
}
}
staticint iio_disable_buffers(struct iio_dev *indio_dev)
{ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer *buffer; int ret = 0; int ret2;
/* Wind down existing buffers - iff there are any */ if (list_empty(&iio_dev_opaque->buffer_list)) return 0;
/* * If things go wrong at some step in disable we still need to continue * to perform the other steps, otherwise we leave the device in a * inconsistent state. We return the error code for the first error we * encountered.
*/
if (indio_dev->setup_ops->predisable) {
ret2 = indio_dev->setup_ops->predisable(indio_dev); if (ret2 && !ret)
ret = ret2;
}
if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
iio_trigger_detach_poll_func(indio_dev->trig,
indio_dev->pollfunc);
}
list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
ret2 = iio_buffer_disable(buffer, indio_dev); if (ret2 && !ret)
ret = ret2;
}
if (indio_dev->setup_ops->postdisable) {
ret2 = indio_dev->setup_ops->postdisable(indio_dev); if (ret2 && !ret)
ret = ret2;
}
ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
&new_config); if (ret) return ret;
if (insert_buffer) {
ret = iio_buffer_request_update(indio_dev, insert_buffer); if (ret) goto err_free_config;
}
ret = iio_disable_buffers(indio_dev); if (ret) goto err_deactivate_all;
if (remove_buffer)
iio_buffer_deactivate(remove_buffer); if (insert_buffer)
iio_buffer_activate(indio_dev, insert_buffer);
/* If no buffers in list, we are done */ if (list_empty(&iio_dev_opaque->buffer_list)) return 0;
ret = iio_enable_buffers(indio_dev, &new_config); if (ret) goto err_deactivate_all;
return 0;
err_deactivate_all: /* * We've already verified that the config is valid earlier. If things go * wrong in either enable or disable the most likely reason is an IO * error from the device. In this case there is no good recovery * strategy. Just make sure to disable everything and leave the device * in a sane state. With a bit of luck the device might come back to * life again later and userspace can try again.
*/
iio_buffer_deactivate_all(indio_dev);
ret = kstrtobool(buf, &requested_state); if (ret < 0) return ret;
guard(mutex)(&iio_dev_opaque->mlock);
/* Find out if it is in the list */
inlist = iio_buffer_is_active(buffer); /* Already in desired state */ if (inlist == requested_state) return len;
if (requested_state)
ret = __iio_update_buffers(indio_dev, buffer, NULL); else
ret = __iio_update_buffers(indio_dev, NULL, buffer); if (ret) return ret;
/* * When adding new attributes here, put the at the end, at least until * the code that handles the length/length_ro & watermark/watermark_ro * assignments gets cleaned up. Otherwise these can create some weird * duplicate attributes errors under some setups.
*/ staticstruct attribute *iio_buffer_attrs[] = {
&dev_attr_length.attr,
&dev_attr_enable.attr,
&dev_attr_watermark.attr,
&dev_attr_data_available.attr,
&dev_attr_direction.attr,
};
/* * Check whether we already have an attachment for this driver/DMABUF * combo. If we do, refuse to attach.
*/
list_for_each_entry(each, &buffer->dmabufs, entry) { if (each->attach->dev == indio_dev->dev.parent
&& each->attach->dmabuf == dmabuf) { /* * We unlocked the reservation object, so going through * the cleanup code would mean re-locking it first. * At this stage it is simpler to free the attachment * using iio_buffer_dma_put().
*/
mutex_unlock(&buffer->dmabufs_mutex);
iio_buffer_dmabuf_put(attach); return -EBUSY;
}
}
/* Otherwise, add the new attachment to our dmabufs list. */
list_add(&priv->entry, &buffer->dmabufs);
mutex_unlock(&buffer->dmabufs_mutex);
/* Cyclic flag is only supported on output buffers */ if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT) return -EINVAL;
dmabuf = dma_buf_get(iio_dmabuf.fd); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf);
if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) {
ret = -EINVAL; goto err_dmabuf_put;
}
attach = iio_buffer_find_attachment(ib, dmabuf, nonblock); if (IS_ERR(attach)) {
ret = PTR_ERR(attach); goto err_dmabuf_put;
}
priv = attach->importer_priv;
fence = kmalloc(sizeof(*fence), GFP_KERNEL); if (!fence) {
ret = -ENOMEM; goto err_attachment_put;
}
fence->priv = priv;
seqno = atomic_add_return(1, &priv->seqno);
/* * The transfers are guaranteed to be processed in the order they are * enqueued, so we can use a simple incrementing sequence number for * the dma_fence.
*/
dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops,
&priv->lock, priv->context, seqno);
ret = iio_dma_resv_lock(dmabuf, nonblock); if (ret) goto err_fence_put;
ret = buffer->access->enqueue_dmabuf(buffer, priv->block, &fence->base,
priv->sgt, iio_dmabuf.bytes_used,
cyclic); if (ret) { /* * DMABUF enqueue failed, but we already added the fence. * Signal the error through the fence completion mechanism.
*/
iio_buffer_signal_dmabuf_done(&fence->base, ret);
}
if (buffer->access->unlock_queue)
buffer->access->unlock_queue(buffer);
/* * The fence will be unref'd in iio_buffer_cleanup. * It can't be done here, as the unref functions might try to lock the * resv object, which can deadlock.
*/
INIT_WORK(&iio_fence->work, iio_buffer_cleanup);
schedule_work(&iio_fence->work);
}
EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
if (copy_from_user(&idx, ival, sizeof(idx))) return -EFAULT;
if (idx >= iio_dev_opaque->attached_buffers_cnt) return -ENODEV;
iio_device_get(indio_dev);
buffer = iio_dev_opaque->attached_buffers[idx];
if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
ret = -EBUSY; goto error_iio_dev_put;
}
ib = kzalloc(sizeof(*ib), GFP_KERNEL); if (!ib) {
ret = -ENOMEM; goto error_clear_busy_bit;
}
ib->indio_dev = indio_dev;
ib->buffer = buffer;
fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
ib, O_RDWR | O_CLOEXEC); if (fd < 0) {
ret = fd; goto error_free_ib;
}
if (copy_to_user(ival, &fd, sizeof(fd))) { /* * "Leak" the fd, as there's not much we can do about this * anyway. 'fd' might have been closed already, as * anon_inode_getfd() called fd_install() on it, which made * it reachable by userland. * * Instead of allowing a malicious user to play tricks with * us, rely on the process exit path to do any necessary * cleanup, as in releasing the file, if still needed.
*/ return -EFAULT;
}
buffer_attrcount = 0; if (buffer->attrs) { while (buffer->attrs[buffer_attrcount])
buffer_attrcount++;
}
buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
scan_el_attrcount = 0;
INIT_LIST_HEAD(&buffer->buffer_attr_list);
channels = indio_dev->channels; if (channels) { /* new magic */ for (i = 0; i < indio_dev->num_channels; i++) { conststruct iio_scan_type *scan_type;
if (channels[i].scan_index < 0) continue;
if (channels[i].has_ext_scan_type) { int j;
/* * get_current_scan_type is required when using * extended scan types.
*/ if (!indio_dev->info->get_current_scan_type) {
ret = -EINVAL; goto error_cleanup_dynamic;
}
for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
buffer = iio_dev_opaque->attached_buffers[i];
__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
}
}
/** * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected * @indio_dev: the iio device * @mask: scan mask to be checked * * Return true if exactly one bit is set in the scan mask, false otherwise. It * can be used for devices where only one channel can be active for sampling at * a time.
*/ bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, constunsignedlong *mask)
{ return bitmap_weight(mask, iio_get_masklength(indio_dev)) == 1;
}
EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
ret = buffer->access->store_to(buffer, dataout); if (ret) return ret;
/* * We can't just test for watermark to decide if we wake the poll queue * because read may request less samples than the watermark.
*/
wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); return 0;
}
/** * iio_push_to_buffers() - push to a registered buffer. * @indio_dev: iio_dev structure for device. * @data: Full scan.
*/ int iio_push_to_buffers(struct iio_dev *indio_dev, constvoid *data)
{ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); int ret; struct iio_buffer *buf;
list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
ret = iio_push_to_buffer(buf, data); if (ret < 0) return ret;
}
/** * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer, * no alignment or space requirements. * @indio_dev: iio_dev structure for device. * @data: channel data excluding the timestamp. * @data_sz: size of data. * @timestamp: timestamp for the sample data. * * This special variant of iio_push_to_buffers_with_timestamp() does * not require space for the timestamp, or 8 byte alignment of data. * It does however require an allocation on first call and additional * copies on all calls, so should be avoided if possible.
*/ int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev, constvoid *data,
size_t data_sz,
int64_t timestamp)
{ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
/* * Conservative estimate - we can always safely copy the minimum * of either the data provided or the length of the destination buffer. * This relaxed limit allows the calling drivers to be lax about * tracking the size of the data they are pushing, at the cost of * unnecessary copying of padding.
*/
data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz); if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) { void *bb;
/** * iio_buffer_release() - Free a buffer's resources * @ref: Pointer to the kref embedded in the iio_buffer struct * * This function is called when the last reference to the buffer has been * dropped. It will typically free all resources allocated by the buffer. Do not * call this function manually, always use iio_buffer_put() when done using a * buffer.
*/ staticvoid iio_buffer_release(struct kref *ref)
{ struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
/** * iio_buffer_get() - Grab a reference to the buffer * @buffer: The buffer to grab a reference for, may be NULL * * Returns the pointer to the buffer that was passed into the function.
*/ struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
{ if (buffer)
kref_get(&buffer->ref);
/** * iio_buffer_put() - Release the reference to the buffer * @buffer: The buffer to release the reference for, may be NULL
*/ void iio_buffer_put(struct iio_buffer *buffer)
{ if (buffer)
kref_put(&buffer->ref, iio_buffer_release);
}
EXPORT_SYMBOL_GPL(iio_buffer_put);
/** * iio_device_attach_buffer - Attach a buffer to a IIO device * @indio_dev: The device the buffer should be attached to * @buffer: The buffer to attach to the device * * Return 0 if successful, negative if error. * * This function attaches a buffer to a IIO device. The buffer stays attached to * the device until the device is freed. For legacy reasons, the first attached * buffer will also be assigned to 'indio_dev->buffer'. * The array allocated here, will be free'd via the iio_device_detach_buffers() * call which is handled by the iio_device_free().
*/ int iio_device_attach_buffer(struct iio_dev *indio_dev, struct iio_buffer *buffer)
{ struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers; unsignedint cnt = iio_dev_opaque->attached_buffers_cnt;
cnt++;
new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL); if (!new) return -ENOMEM;
iio_dev_opaque->attached_buffers = new;
buffer = iio_buffer_get(buffer);
/* first buffer is legacy; attach it to the IIO device directly */ if (!indio_dev->buffer)
indio_dev->buffer = buffer;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.