/** * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a * memory operation * @ctlr: the SPI controller requesting this dma_map() * @op: the memory operation containing the buffer to map * @sgt: a pointer to a non-initialized sg_table that will be filled by this * function * * Some controllers might want to do DMA on the data buffer embedded in @op. * This helper prepares everything for you and provides a ready-to-use * sg_table. This function is not intended to be called from spi drivers. * Only SPI controller drivers should use it. * Note that the caller must ensure the memory region pointed by * op->data.buf.{in,out} is DMA-able before calling this function. * * Return: 0 in case of success, a negative error code otherwise.
*/ int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, conststruct spi_mem_op *op, struct sg_table *sgt)
{ struct device *dmadev;
/** * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a * memory operation * @ctlr: the SPI controller requesting this dma_unmap() * @op: the memory operation containing the buffer to unmap * @sgt: a pointer to an sg_table previously initialized by * spi_controller_dma_map_mem_op_data() * * Some controllers might want to do DMA on the data buffer embedded in @op. * This helper prepares things so that the CPU can access the * op->data.buf.{in,out} buffer again. * * This function is not intended to be called from SPI drivers. Only SPI * controller drivers should use it. * * This function should be called after the DMA operation has finished and is * only valid if the previous spi_controller_dma_map_mem_op_data() call * returned 0. * * Return: 0 in case of success, a negative error code otherwise.
*/ void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, conststruct spi_mem_op *op, struct sg_table *sgt)
{ struct device *dmadev;
if (ctlr->mem_ops && ctlr->mem_ops->supports_op) return ctlr->mem_ops->supports_op(mem, op);
return spi_mem_default_supports_op(mem, op);
}
/** * spi_mem_supports_op() - Check if a memory device and the controller it is * connected to support a specific memory operation * @mem: the SPI memory * @op: the memory operation to check * * Some controllers are only supporting Single or Dual IOs, others might only * support specific opcodes, or it can even be that the controller and device * both support Quad IOs but the hardware prevents you from using it because * only 2 IO lines are connected. * * This function checks whether a specific operation is supported. * * Return: true if @op is supported, false otherwise.
*/ bool spi_mem_supports_op(struct spi_mem *mem, conststruct spi_mem_op *op)
{ /* Make sure the operation frequency is correct before going futher */
spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
/* * We do not have the concept of messages or transfers. Let's consider * that one operation is equivalent to one message and one transfer.
*/
u64_stats_inc(&stats->messages);
u64_stats_inc(&stats->transfers);
/* Use the sum of all lengths as bytes count and histogram value. */
len = op->cmd.nbytes + op->addr.nbytes;
len += op->dummy.nbytes + op->data.nbytes;
u64_stats_add(&stats->bytes, len);
l2len = min(fls(len), SPI_STATISTICS_HISTO_SIZE) - 1;
u64_stats_inc(&stats->transfer_bytes_histo[l2len]);
/* Only account for data bytes as transferred bytes. */ if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
u64_stats_add(&stats->bytes_tx, op->data.nbytes); if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
u64_stats_add(&stats->bytes_rx, op->data.nbytes);
/* * A timeout is not an error, following the same behavior as * spi_transfer_one_message().
*/ if (exec_op_ret == -ETIMEDOUT)
u64_stats_inc(&stats->timedout); elseif (exec_op_ret)
u64_stats_inc(&stats->errors);
u64_stats_update_end(&stats->syncp);
put_cpu();
}
/** * spi_mem_exec_op() - Execute a memory operation * @mem: the SPI memory * @op: the memory operation to execute * * Executes a memory operation. * * This function first checks that @op is supported and then tries to execute * it. * * Return: 0 in case of success, a negative error code otherwise.
*/ int spi_mem_exec_op(struct spi_mem *mem, conststruct spi_mem_op *op)
{ unsignedint tmpbufsize, xferpos = 0, totalxferlen = 0; struct spi_controller *ctlr = mem->spi->controller; struct spi_transfer xfers[4] = { }; struct spi_message msg;
u8 *tmpbuf; int ret;
/* Make sure the operation frequency is correct before going futher */
spi_mem_adjust_op_freq(mem, (struct spi_mem_op *)op);
if (!spi_mem_internal_supports_op(mem, op)) return -EOPNOTSUPP;
if (ctlr->mem_ops && ctlr->mem_ops->exec_op && !spi_get_csgpiod(mem->spi, 0)) {
ret = spi_mem_access_start(mem); if (ret) return ret;
ret = ctlr->mem_ops->exec_op(mem, op);
spi_mem_access_end(mem);
/* * Some controllers only optimize specific paths (typically the * read path) and expect the core to use the regular SPI * interface in other cases.
*/ if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
/* * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so * we're guaranteed that this buffer is DMA-able, as required by the * SPI layer.
*/
tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); if (!tmpbuf) return -ENOMEM;
if (msg.actual_length != totalxferlen) return -EIO;
return 0;
}
EXPORT_SYMBOL_GPL(spi_mem_exec_op);
/** * spi_mem_get_name() - Return the SPI mem device name to be used by the * upper layer if necessary * @mem: the SPI memory * * This function allows SPI mem users to retrieve the SPI mem device name. * It is useful if the upper layer needs to expose a custom name for * compatibility reasons. * * Return: a string containing the name of the memory device to be used * by the SPI mem user
*/ constchar *spi_mem_get_name(struct spi_mem *mem)
{ return mem->name;
}
EXPORT_SYMBOL_GPL(spi_mem_get_name);
/** * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to * match controller limitations * @mem: the SPI memory * @op: the operation to adjust * * Some controllers have FIFO limitations and must split a data transfer * operation into multiple ones, others require a specific alignment for * optimized accesses. This function allows SPI mem drivers to split a single * operation into multiple sub-operations when required. * * Return: a negative error code if the controller can't properly adjust @op, * 0 otherwise. Note that @op->data.nbytes will be updated if @op * can't be handled in a single step.
*/ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{ struct spi_controller *ctlr = mem->spi->controller;
size_t len;
if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) return ctlr->mem_ops->adjust_op_size(mem, op);
if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
if (len > spi_max_transfer_size(mem->spi)) return -EINVAL;
/** * spi_mem_adjust_op_freq() - Adjust the frequency of a SPI mem operation to * match controller, PCB and chip limitations * @mem: the SPI memory * @op: the operation to adjust * * Some chips have per-op frequency limitations and must adapt the maximum * speed. This function allows SPI mem drivers to set @op->max_freq to the * maximum supported value.
*/ void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op)
{ if (!op->max_freq || op->max_freq > mem->spi->max_speed_hz)
op->max_freq = mem->spi->max_speed_hz;
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_freq);
/** * spi_mem_calc_op_duration() - Derives the theoretical length (in ns) of an * operation. This helps finding the best variant * among a list of possible choices. * @mem: the SPI memory * @op: the operation to benchmark * * Some chips have per-op frequency limitations, PCBs usually have their own * limitations as well, and controllers can support dual, quad or even octal * modes, sometimes in DTR. All these combinations make it impossible to * statically list the best combination for all situations. If we want something * accurate, all these combinations should be rated (eg. with a time estimate) * and the best pick should be taken based on these calculations. * * Returns a ns estimate for the time this op would take, except if no * frequency limit has been set, in this case we return the number of * cycles nevertheless to allow callers to distinguish which operation * would be the fastest at iso-frequency.
*/
u64 spi_mem_calc_op_duration(struct spi_mem *mem, struct spi_mem_op *op)
{
u64 ncycles = 0;
u64 ps_per_cycles, duration;
spi_mem_adjust_op_freq(mem, op);
if (op->max_freq) {
ps_per_cycles = 1000000000000ULL;
do_div(ps_per_cycles, op->max_freq);
} else { /* In this case, the unit is no longer a time unit */
ps_per_cycles = 1;
}
static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{ struct spi_mem_op op = desc->info.op_tmpl; int ret;
op.addr.val = desc->info.offset + offs;
op.data.buf.in = buf;
op.data.nbytes = len;
ret = spi_mem_adjust_op_size(desc->mem, &op); if (ret) return ret;
ret = spi_mem_exec_op(desc->mem, &op); if (ret) return ret;
return op.data.nbytes;
}
static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, constvoid *buf)
{ struct spi_mem_op op = desc->info.op_tmpl; int ret;
op.addr.val = desc->info.offset + offs;
op.data.buf.out = buf;
op.data.nbytes = len;
ret = spi_mem_adjust_op_size(desc->mem, &op); if (ret) return ret;
ret = spi_mem_exec_op(desc->mem, &op); if (ret) return ret;
return op.data.nbytes;
}
/** * spi_mem_dirmap_create() - Create a direct mapping descriptor * @mem: SPI mem device this direct mapping should be created for * @info: direct mapping information * * This function is creating a direct mapping descriptor which can then be used * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). * If the SPI controller driver does not support direct mapping, this function * falls back to an implementation using spi_mem_exec_op(), so that the caller * doesn't have to bother implementing a fallback on his own. * * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
*/ struct spi_mem_dirmap_desc *
spi_mem_dirmap_create(struct spi_mem *mem, conststruct spi_mem_dirmap_info *info)
{ struct spi_controller *ctlr = mem->spi->controller; struct spi_mem_dirmap_desc *desc; int ret = -ENOTSUPP;
/* Make sure the number of address cycles is between 1 and 8 bytes. */ if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) return ERR_PTR(-EINVAL);
/* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) return ERR_PTR(-EINVAL);
desc = kzalloc(sizeof(*desc), GFP_KERNEL); if (!desc) return ERR_PTR(-ENOMEM);
desc->mem = mem;
desc->info = *info; if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
ret = ctlr->mem_ops->dirmap_create(desc);
if (ret) {
desc->nodirmap = true; if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
ret = -EOPNOTSUPP; else
ret = 0;
}
/** * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor * @desc: the direct mapping descriptor to destroy * * This function destroys a direct mapping descriptor previously created by * spi_mem_dirmap_create().
*/ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
{ struct spi_controller *ctlr = desc->mem->spi->controller;
if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
ctlr->mem_ops->dirmap_destroy(desc);
/** * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach * it to a device * @dev: device the dirmap desc will be attached to * @mem: SPI mem device this direct mapping should be created for * @info: direct mapping information * * devm_ variant of the spi_mem_dirmap_create() function. See * spi_mem_dirmap_create() for more details. * * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
*/ struct spi_mem_dirmap_desc *
devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, conststruct spi_mem_dirmap_info *info)
{ struct spi_mem_dirmap_desc **ptr, *desc;
ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
GFP_KERNEL); if (!ptr) return ERR_PTR(-ENOMEM);
/** * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached * to a device * @dev: device the dirmap desc is attached to * @desc: the direct mapping descriptor to destroy * * devm_ variant of the spi_mem_dirmap_destroy() function. See * spi_mem_dirmap_destroy() for more details.
*/ void devm_spi_mem_dirmap_destroy(struct device *dev, struct spi_mem_dirmap_desc *desc)
{
devres_release(dev, devm_spi_mem_dirmap_release,
devm_spi_mem_dirmap_match, desc);
}
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
/** * spi_mem_dirmap_read() - Read data through a direct mapping * @desc: direct mapping descriptor * @offs: offset to start reading from. Note that this is not an absolute * offset, but the offset within the direct mapping which already has * its own offset * @len: length in bytes * @buf: destination buffer. This buffer must be DMA-able * * This function reads data from a memory device using a direct mapping * previously instantiated with spi_mem_dirmap_create(). * * Return: the amount of data read from the memory device or a negative error * code. Note that the returned size might be smaller than @len, and the caller * is responsible for calling spi_mem_dirmap_read() again when that happens.
*/
ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf)
{ struct spi_controller *ctlr = desc->mem->spi->controller;
ssize_t ret;
if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) return -EINVAL;
if (!len) return 0;
if (desc->nodirmap) {
ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
} elseif (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
ret = spi_mem_access_start(desc->mem); if (ret) return ret;
ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
spi_mem_access_end(desc->mem);
} else {
ret = -ENOTSUPP;
}
/** * spi_mem_dirmap_write() - Write data through a direct mapping * @desc: direct mapping descriptor * @offs: offset to start writing from. Note that this is not an absolute * offset, but the offset within the direct mapping which already has * its own offset * @len: length in bytes * @buf: source buffer. This buffer must be DMA-able * * This function writes data to a memory device using a direct mapping * previously instantiated with spi_mem_dirmap_create(). * * Return: the amount of data written to the memory device or a negative error * code. Note that the returned size might be smaller than @len, and the caller * is responsible for calling spi_mem_dirmap_write() again when that happens.
*/
ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, constvoid *buf)
{ struct spi_controller *ctlr = desc->mem->spi->controller;
ssize_t ret;
if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) return -EINVAL;
if (!len) return 0;
if (desc->nodirmap) {
ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
} elseif (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
ret = spi_mem_access_start(desc->mem); if (ret) return ret;
ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
spi_mem_access_end(desc->mem);
} else {
ret = -ENOTSUPP;
}
/** * spi_mem_poll_status() - Poll memory device status * @mem: SPI memory device * @op: the memory operation to execute * @mask: status bitmask to ckeck * @match: (status & mask) expected value * @initial_delay_us: delay in us before starting to poll * @polling_delay_us: time to sleep between reads in us * @timeout_ms: timeout in milliseconds * * This function polls a status register and returns when * (status & mask) == match or when the timeout has expired. * * Return: 0 in case of success, -ETIMEDOUT in case of error, * -EOPNOTSUPP if not supported.
*/ int spi_mem_poll_status(struct spi_mem *mem, conststruct spi_mem_op *op,
u16 mask, u16 match, unsignedlong initial_delay_us, unsignedlong polling_delay_us,
u16 timeout_ms)
{ struct spi_controller *ctlr = mem->spi->controller; int ret = -EOPNOTSUPP; int read_status_ret;
u16 status;
/** * spi_mem_driver_register_with_owner() - Register a SPI memory driver * @memdrv: the SPI memory driver to register * @owner: the owner of this driver * * Registers a SPI memory driver. * * Return: 0 in case of success, a negative error core otherwise.
*/
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.