/* * * Find or create vm block based on requested @size. * @size must be page aligned.
* */ staticstruct ct_vm_block *
get_vm_block(struct ct_vm *vm, unsignedint size, struct ct_atc *atc)
{ struct ct_vm_block *block = NULL, *entry; struct list_head *pos;
size = CT_PAGE_ALIGN(size); if (size > vm->size) {
dev_err(atc->card->dev, "Fail! No sufficient device virtual memory space available!\n"); return NULL;
}
mutex_lock(&vm->lock);
list_for_each(pos, &vm->unused) {
entry = list_entry(pos, struct ct_vm_block, list); if (entry->size >= size) break; /* found a block that is big enough */
} if (pos == &vm->unused) goto out;
if (entry->size == size) { /* Move the vm node from unused list to used list directly */
list_move(&entry->list, &vm->used);
vm->size -= size;
block = entry; goto out;
}
block = kzalloc(sizeof(*block), GFP_KERNEL); if (!block) goto out;
block = get_vm_block(vm, size, atc); if (block == NULL) {
dev_err(atc->card->dev, "No virtual memory block that is big enough to allocate!\n"); return NULL;
}
ptp = (unsignedlong *)vm->ptp[0].area;
pte_start = (block->addr >> CT_PAGE_SHIFT);
pages = block->size >> CT_PAGE_SHIFT; for (i = 0; i < pages; i++) { unsignedlong addr;
addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
ptp[pte_start + i] = addr;
}
/* * * return the host physical addr of the @index-th device * page table page on success, or ~0UL on failure. * The first returned ~0UL indicates the termination.
* */ static dma_addr_t
ct_get_ptp_phys(struct ct_vm *vm, int index)
{ return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
}
int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
{ struct ct_vm *vm; struct ct_vm_block *block; int i, err = 0;
*rvm = NULL;
vm = kzalloc(sizeof(*vm), GFP_KERNEL); if (!vm) return -ENOMEM;
mutex_init(&vm->lock);
/* Allocate page table pages */ for (i = 0; i < CT_PTP_NUM; i++) {
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
&pci->dev,
PAGE_SIZE, &vm->ptp[i]); if (err < 0) break;
} if (err < 0) { /* no page table pages are allocated */
ct_vm_destroy(vm); return -ENOMEM;
}
vm->size = CT_ADDRS_PER_PAGE * i;
vm->map = ct_vm_map;
vm->unmap = ct_vm_unmap;
vm->get_ptp_phys = ct_get_ptp_phys;
INIT_LIST_HEAD(&vm->unused);
INIT_LIST_HEAD(&vm->used);
block = kzalloc(sizeof(*block), GFP_KERNEL); if (NULL != block) {
block->addr = 0;
block->size = vm->size;
list_add(&block->list, &vm->unused);
}
*rvm = vm; return 0;
}
/* The caller must ensure no mapping pages are being used
* by hardware before calling this function */ void ct_vm_destroy(struct ct_vm *vm)
{ int i; struct list_head *pos; struct ct_vm_block *entry;
/* free used and unused list nodes */ while (!list_empty(&vm->used)) {
pos = vm->used.next;
list_del(pos);
entry = list_entry(pos, struct ct_vm_block, list);
kfree(entry);
} while (!list_empty(&vm->unused)) {
pos = vm->unused.next;
list_del(pos);
entry = list_entry(pos, struct ct_vm_block, list);
kfree(entry);
}
/* free allocated page table pages */ for (i = 0; i < CT_PTP_NUM; i++)
snd_dma_free_pages(&vm->ptp[i]);
vm->size = 0;
kfree(vm);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.0 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.