/* * search empty region on PTB with the given size * * if an empty region is found, return the page and store the next mapped block * in nextp * if not found, return a negative error code.
*/ staticint search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct list_head **nextp)
{ int page = 1, found_page = -ENOMEM; int max_size = npages; int size; struct list_head *candidate = &emu->mapped_link_head; struct list_head *pos;
/* * map a memory block onto emu10k1's PTB * * call with memblk_lock held
*/ staticint map_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
{ int page, pg; struct list_head *next;
page = search_empty_map_area(emu, blk->pages, &next); if (page < 0) /* not found */ return page; if (page == 0) {
dev_err(emu->card->dev, "trying to map zero (reserved) page\n"); return -EINVAL;
} /* insert this block in the proper position of mapped list */
list_add_tail(&blk->mapped_link, next); /* append this as a newest block in order list */
list_add_tail(&blk->mapped_order_link, &emu->mapped_order_link_head);
blk->mapped_page = page; /* fill PTB */ for (pg = blk->first_page; pg <= blk->last_page; pg++) {
set_ptb_entry(emu, page, emu->page_addr_table[pg]);
page++;
} return 0;
}
/* * unmap the block * return the size of resultant empty pages * * call with memblk_lock held
*/ staticint unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
{ int start_page, end_page, mpage, pg; struct list_head *p; struct snd_emu10k1_memblk *q;
/* calculate the expected size of empty region */
p = blk->mapped_link.prev; if (p != &emu->mapped_link_head) {
q = get_emu10k1_memblk(p, mapped_link);
start_page = q->mapped_page + q->pages;
} else {
start_page = 1;
}
p = blk->mapped_link.next; if (p != &emu->mapped_link_head) {
q = get_emu10k1_memblk(p, mapped_link);
end_page = q->mapped_page;
} else {
end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
}
/* * search empty pages with the given size, and create a memory block * * unlike synth_alloc the memory block is aligned to the page start
*/ staticstruct snd_emu10k1_memblk *
search_empty(struct snd_emu10k1 *emu, int size)
{ struct list_head *p; struct snd_emu10k1_memblk *blk; int page, psize;
__found_pages: /* create a new memory block */
blk = (struct snd_emu10k1_memblk *)__snd_util_memblk_new(emu->memhdr, psize << PAGE_SHIFT, p->prev); if (blk == NULL) return NULL;
blk->mem.offset = aligned_page_offset(page); /* set aligned offset */
emu10k1_memblk_init(blk); return blk;
}
/* * check if the given pointer is valid for pages
*/ staticint is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr)
{ if (addr & ~emu->dma_mask) {
dev_err_ratelimited(emu->card->dev, "max memory size is 0x%lx (addr = 0x%lx)!!\n",
emu->dma_mask, (unsignedlong)addr); return 0;
} if (addr & (EMUPAGESIZE-1)) {
dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); return 0;
} return 1;
}
/* * map the given memory block on PTB. * if the block is already mapped, update the link order. * if no empty pages are found, tries to release unused memory blocks * and retry the mapping.
*/ int snd_emu10k1_memblk_map(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
{ int err; int size; struct list_head *p, *nextp; struct snd_emu10k1_memblk *deleted; unsignedlong flags;
spin_lock_irqsave(&emu->memblk_lock, flags); if (blk->mapped_page >= 0) { /* update order link */
list_move_tail(&blk->mapped_order_link,
&emu->mapped_order_link_head);
spin_unlock_irqrestore(&emu->memblk_lock, flags); return 0;
}
err = map_memblk(emu, blk); if (err < 0) { /* no enough page - try to unmap some blocks */ /* starting from the oldest block */
p = emu->mapped_order_link_head.next; for (; p != &emu->mapped_order_link_head; p = nextp) {
nextp = p->next;
deleted = get_emu10k1_memblk(p, mapped_order_link); if (deleted->map_locked) continue;
size = unmap_memblk(emu, deleted); if (size >= blk->pages) { /* ok the empty region is enough large */
err = map_memblk(emu, blk); break;
}
}
}
spin_unlock_irqrestore(&emu->memblk_lock, flags); return err;
}
if (snd_BUG_ON(!emu)) return NULL; if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE)) return NULL;
hdr = emu->memhdr; if (snd_BUG_ON(!hdr)) return NULL;
mutex_lock(&hdr->block_mutex);
blk = search_empty(emu, runtime->dma_bytes); if (blk == NULL) {
mutex_unlock(&hdr->block_mutex); return NULL;
} /* fill buffer addresses but pointers are not stored so that * snd_free_pci_page() is not called in synth_free()
*/
idx = 0; for (page = blk->first_page; page <= blk->last_page; page++, idx++) { unsignedlong ofs = idx << PAGE_SHIFT;
dma_addr_t addr; if (ofs >= runtime->dma_bytes)
addr = emu->silent_page.addr; else
addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (! is_valid_page(emu, addr)) {
dev_err_ratelimited(emu->card->dev, "emu: failure page = %d\n", idx);
mutex_unlock(&hdr->block_mutex); return NULL;
}
emu->page_addr_table[page] = addr;
emu->page_ptr_table[page] = NULL;
}
/* set PTB entries */
blk->map_locked = 1; /* do not unmap this block! */
err = snd_emu10k1_memblk_map(emu, blk); if (err < 0) {
__snd_util_mem_free(hdr, (struct snd_util_memblk *)blk);
mutex_unlock(&hdr->block_mutex); return NULL;
}
mutex_unlock(&hdr->block_mutex); return (struct snd_util_memblk *)blk;
}
/* * release DMA buffer from page table
*/ int snd_emu10k1_free_pages(struct snd_emu10k1 *emu, struct snd_util_memblk *blk)
{ if (snd_BUG_ON(!emu || !blk)) return -EINVAL; return snd_emu10k1_synth_free(emu, blk);
}
/* * allocate DMA pages, widening the allocation if necessary * * See the comment above snd_emu10k1_detect_iommu() in emu10k1_main.c why * this might be needed. * * If you modify this function check whether __synth_free_pages() also needs * changes.
*/ int snd_emu10k1_alloc_pages_maybe_wider(struct snd_emu10k1 *emu, size_t size, struct snd_dma_buffer *dmab)
{ if (emu->iommu_workaround) {
size_t npages = DIV_ROUND_UP(size, PAGE_SIZE);
size_t size_real = npages * PAGE_SIZE;
/* * The device has been observed to accesses up to 256 extra * bytes, but use 1k to be safe.
*/ if (size_real < size + 1024)
size += PAGE_SIZE;
}
for (page = first_page; page <= last_page; page++) { if (emu->page_ptr_table[page] == NULL) continue;
dmab.area = emu->page_ptr_table[page];
dmab.addr = emu->page_addr_table[page];
/* * please keep me in sync with logic in * snd_emu10k1_alloc_pages_maybe_wider()
*/
dmab.bytes = PAGE_SIZE; if (emu->iommu_workaround)
dmab.bytes *= 2;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.