if (WARN_ON_ONCE(!ops || !ops->alloc)) return NULL; return ops->alloc(dmab, size);
}
/** * snd_dma_alloc_dir_pages - allocate the buffer area according to the given * type and direction * @type: the DMA buffer type * @device: the device pointer * @dir: DMA direction * @size: the buffer size to allocate * @dmab: buffer allocation record to store the allocated data * * Calls the memory-allocator function for the corresponding * buffer type. * * Return: Zero if the buffer with the given size is allocated successfully, * otherwise a negative value on error.
*/ int snd_dma_alloc_dir_pages(int type, struct device *device, enum dma_data_direction dir, size_t size, struct snd_dma_buffer *dmab)
{ if (WARN_ON(!size)) return -ENXIO; if (WARN_ON(!dmab)) return -ENXIO;
/** * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback * @type: the DMA buffer type * @device: the device pointer * @size: the buffer size to allocate * @dmab: buffer allocation record to store the allocated data * * Calls the memory-allocator function for the corresponding * buffer type. When no space is left, this function reduces the size and * tries to allocate again. The size actually allocated is stored in * res_size argument. * * Return: Zero if the buffer with the given size is allocated successfully, * otherwise a negative value on error.
*/ int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size, struct snd_dma_buffer *dmab)
{ int err;
while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) { if (err != -ENOMEM) return err; if (size <= PAGE_SIZE) return -ENOMEM;
size >>= 1;
size = PAGE_SIZE << get_order(size);
} if (! dmab->area) return -ENOMEM; return 0;
}
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
/** * snd_dma_free_pages - release the allocated buffer * @dmab: the buffer allocation record to release * * Releases the allocated buffer via snd_dma_alloc_pages().
*/ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
{ conststruct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
if (ops && ops->free)
ops->free(dmab);
}
EXPORT_SYMBOL(snd_dma_free_pages);
/* called by devres */ staticvoid __snd_release_pages(struct device *dev, void *res)
{
snd_dma_free_pages(res);
}
/** * snd_devm_alloc_dir_pages - allocate the buffer and manage with devres * @dev: the device pointer * @type: the DMA buffer type * @dir: DMA direction * @size: the buffer size to allocate * * Allocate buffer pages depending on the given type and manage using devres. * The pages will be released automatically at the device removal. * * Unlike snd_dma_alloc_pages(), this function requires the real device pointer, * hence it can't work with SNDRV_DMA_TYPE_CONTINUOUS or * SNDRV_DMA_TYPE_VMALLOC type. * * Return: the snd_dma_buffer object at success, or NULL if failed
*/ struct snd_dma_buffer *
snd_devm_alloc_dir_pages(struct device *dev, int type, enum dma_data_direction dir, size_t size)
{ struct snd_dma_buffer *dmab; int err;
if (WARN_ON(type == SNDRV_DMA_TYPE_CONTINUOUS ||
type == SNDRV_DMA_TYPE_VMALLOC)) return NULL;
dmab = devres_alloc(__snd_release_pages, sizeof(*dmab), GFP_KERNEL); if (!dmab) return NULL;
/** * snd_dma_buffer_mmap - perform mmap of the given DMA buffer * @dmab: buffer allocation information * @area: VM area information * * Return: zero if successful, or a negative error code
*/ int snd_dma_buffer_mmap(struct snd_dma_buffer *dmab, struct vm_area_struct *area)
{ conststruct snd_malloc_ops *ops;
if (!dmab) return -ENOENT;
ops = snd_dma_get_ops(dmab); if (ops && ops->mmap) return ops->mmap(dmab, area); else return -ENOENT;
}
EXPORT_SYMBOL(snd_dma_buffer_mmap);
#ifdef CONFIG_HAS_DMA /** * snd_dma_buffer_sync - sync DMA buffer between CPU and device * @dmab: buffer allocation information * @mode: sync mode
*/ void snd_dma_buffer_sync(struct snd_dma_buffer *dmab, enum snd_dma_sync_mode mode)
{ conststruct snd_malloc_ops *ops;
if (!dmab || !dmab->dev.need_sync) return;
ops = snd_dma_get_ops(dmab); if (ops && ops->sync)
ops->sync(dmab, mode);
}
EXPORT_SYMBOL_GPL(snd_dma_buffer_sync); #endif/* CONFIG_HAS_DMA */
/** * snd_sgbuf_get_addr - return the physical address at the corresponding offset * @dmab: buffer allocation information * @offset: offset in the ring buffer * * Return: the physical address
*/
dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab, size_t offset)
{ conststruct snd_malloc_ops *ops = snd_dma_get_ops(dmab);
if (dev->of_node) {
pool = of_gen_pool_get(dev->of_node, "iram", 0); /* Assign the pool into private_data field */
dmab->private_data = pool;
p = gen_pool_dma_alloc_align(pool, size, &dmab->addr, PAGE_SIZE); if (p) return p;
}
/* Internal memory might have limited size and no enough space, * so if we fail to malloc, try to fetch memory traditionally.
*/
dmab->dev.type = SNDRV_DMA_TYPE_DEV; return __snd_dma_alloc_pages(dmab, size);
}
#ifdef CONFIG_SND_DMA_SGBUF /* Fallback SG-buffer allocations for x86 */ struct snd_dma_sg_fallback { struct sg_table sgt; /* used by get_addr - must be the first item */
size_t count; struct page **pages; unsignedint *npages;
};
staticvoid *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
{ int type = dmab->dev.type; void *p;
/* try the standard DMA API allocation at first */ if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC; else
dmab->dev.type = SNDRV_DMA_TYPE_DEV;
p = __snd_dma_alloc_pages(dmab, size); if (p) return p;
dmab->dev.type = type; /* restore the type */ return snd_dma_sg_fallback_alloc(dmab, size);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.