/** * enum iio_block_state - State of a struct iio_dma_buffer_block * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
*/ enum iio_block_state {
IIO_BLOCK_STATE_QUEUED,
IIO_BLOCK_STATE_ACTIVE,
IIO_BLOCK_STATE_DONE,
IIO_BLOCK_STATE_DEAD,
};
/** * struct iio_dma_buffer_block - IIO buffer block * @head: List head * @size: Total size of the block in bytes * @bytes_used: Number of bytes that contain valid data * @vaddr: Virutal address of the blocks memory * @phys_addr: Physical address of the blocks memory * @queue: Parent DMA buffer queue * @kref: kref used to manage the lifetime of block * @state: Current state of the block * @cyclic: True if this is a cyclic buffer * @fileio: True if this buffer is used for fileio mode * @sg_table: DMA table for the transfer when transferring a DMABUF * @fence: DMA fence to be signaled when a DMABUF transfer is complete
*/ struct iio_dma_buffer_block { /* May only be accessed by the owner of the block */ struct list_head head;
size_t bytes_used;
/* * Set during allocation, constant thereafter. May be accessed read-only * by anybody holding a reference to the block.
*/ void *vaddr;
dma_addr_t phys_addr;
size_t size; struct iio_dma_buffer_queue *queue;
/* Must not be accessed outside the core. */ struct kref kref; /* * Must not be accessed outside the core. Access needs to hold * queue->list_lock if the block is not owned by the core.
*/ enum iio_block_state state;
/** * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer * @blocks: Buffer blocks used for fileio * @active_block: Block being used in read() * @pos: Read offset in the active block * @block_size: Size of each block * @next_dequeue: index of next block that will be dequeued * @enabled: Whether the buffer is operating in fileio mode
*/ struct iio_dma_buffer_queue_fileio { struct iio_dma_buffer_block *blocks[2]; struct iio_dma_buffer_block *active_block;
size_t pos;
size_t block_size;
unsignedint next_dequeue; bool enabled;
};
/** * struct iio_dma_buffer_queue - DMA buffer base structure * @buffer: IIO buffer base structure * @dev: Parent device * @ops: DMA buffer callbacks * @lock: Protects the incoming list, active and the fields in the fileio * substruct * @list_lock: Protects lists that contain blocks which can be modified in * atomic context as well as blocks on those lists. This is the outgoing queue * list and typically also a list of active blocks in the part that handles * the DMA controller * @incoming: List of buffers on the incoming queue * @active: Whether the buffer is currently active * @num_dmabufs: Total number of DMABUFs attached to this queue * @fileio: FileIO state
*/ struct iio_dma_buffer_queue { struct iio_buffer buffer; struct device *dev; conststruct iio_dma_buffer_ops *ops;
/** * struct iio_dma_buffer_ops - DMA buffer callback operations * @submit: Called when a block is submitted to the DMA controller * @abort: Should abort all pending transfers
*/ struct iio_dma_buffer_ops { int (*submit)(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block); void (*abort)(struct iio_dma_buffer_queue *queue);
};
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.