/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Cisco Systems. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE.
*/
/* * We allocate in as big chunks as we can, up to a maximum of 256 KB * per chunk.
*/ enum {
MTHCA_ICM_ALLOC_SIZE = 1 << 18,
MTHCA_TABLE_CHUNK_SIZE = 1 << 18
};
staticvoid mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
{ int i;
if (chunk->nsg > 0)
dma_unmap_sg(&dev->pdev->dev, chunk->mem, chunk->npages,
DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
staticvoid mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
{ int i;
for (i = 0; i < chunk->npages; ++i) {
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
}
/* * Use __GFP_ZERO because buggy firmware assumes ICM pages are * cleared, and subtle failures are seen if they aren't.
*/
page = alloc_pages(gfp_mask | __GFP_ZERO, order); if (!page) return -ENOMEM;
if (coherent)
ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
&chunk->mem[chunk->npages],
cur_order, gfp_mask); else
ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask);
int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
{ int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; int ret = 0;
mutex_lock(&table->mutex);
if (table->icm[i]) {
++table->icm[i]->refcount; goto out;
}
list_for_each_entry(chunk, &icm->chunk_list, list) { for (i = 0; i < chunk->npages; ++i) { if (dma_handle && dma_offset >= 0) { if (sg_dma_len(&chunk->mem[i]) > dma_offset)
*dma_handle = sg_dma_address(&chunk->mem[i]) +
dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]);
} /* DMA mapping can merge pages but not split them, * so if we found the page, dma_handle has already
* been assigned to. */ if (chunk->mem[i].length > offset) {
page = sg_page(&chunk->mem[i]); goto out;
}
offset -= chunk->mem[i].length;
}
}
int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end)
{ int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; int i, err;
for (i = start; i <= end; i += inc) {
err = mthca_table_get(dev, table, i); if (err) goto fail;
}
return 0;
fail: while (i > start) {
i -= inc;
mthca_table_put(dev, table, i);
}
return err;
}
void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, int start, int end)
{ int i;
if (!mthca_is_memfree(dev)) return;
for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
mthca_table_put(dev, table, i);
}
struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
u64 virt, int obj_size, int nobj, int reserved, int use_lowmem, int use_coherent)
{ struct mthca_icm_table *table; int obj_per_chunk; int num_icm; unsigned chunk_size; int i;
/* * Add a reference to this ICM chunk so that it never * gets freed (since it contains reserved firmware objects).
*/
++table->icm[i]->refcount;
}
return table;
err: for (i = 0; i < num_icm; ++i) if (table->icm[i]) {
mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
}
kfree(table);
return NULL;
}
void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
{ int i;
for (i = 0; i < table->num_icm; ++i) if (table->icm[i]) {
mthca_UNMAP_ICM(dev,
table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
mthca_free_icm(dev, table->icm[i], table->coherent);
}
int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, struct mthca_user_db_table *db_tab, int index, u64 uaddr)
{ struct page *pages[1]; int ret = 0; int i;
if (!mthca_is_memfree(dev)) return 0;
if (index < 0 || index > dev->uar_table.uarc_size / 8) return -EINVAL;
for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { if (db_tab->page[i].uvirt) {
mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
dma_unmap_sg(&dev->pdev->dev, &db_tab->page[i].mem, 1,
DMA_TO_DEVICE);
unpin_user_page(sg_page(&db_tab->page[i].mem));
}
}
kfree(db_tab);
}
int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
u32 qn, __be32 **db)
{ int group; int start, end, dir; int i, j; struct mthca_db_page *page; int ret = 0;
mutex_lock(&dev->db_tab->mutex);
switch (type) { case MTHCA_DB_TYPE_CQ_ARM: case MTHCA_DB_TYPE_SQ:
group = 0;
start = 0;
end = dev->db_tab->max_group1;
dir = 1; break;
case MTHCA_DB_TYPE_CQ_SET_CI: case MTHCA_DB_TYPE_RQ: case MTHCA_DB_TYPE_SRQ:
group = 1;
start = dev->db_tab->npages - 1;
end = dev->db_tab->min_group2;
dir = -1; break;
default:
ret = -EINVAL; goto out;
}
for (i = start; i != end; i += dir) if (dev->db_tab->page[i].db_rec &&
!bitmap_full(dev->db_tab->page[i].used,
MTHCA_DB_REC_PER_PAGE)) {
page = dev->db_tab->page + i; goto found;
}
for (i = start; i != end; i += dir) if (!dev->db_tab->page[i].db_rec) {
page = dev->db_tab->page + i; goto alloc;
}
if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
ret = -ENOMEM; goto out;
}
if (group == 0)
++dev->db_tab->max_group1; else
--dev->db_tab->min_group2;
page = dev->db_tab->page + end;
alloc:
page->db_rec = dma_alloc_coherent(&dev->pdev->dev,
MTHCA_ICM_PAGE_SIZE, &page->mapping,
GFP_KERNEL); if (!page->db_rec) {
ret = -ENOMEM; goto out;
}
ret = mthca_MAP_ICM_page(dev, page->mapping,
mthca_uarc_virt(dev, &dev->driver_uar, i)); if (ret) {
dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
page->db_rec, page->mapping); goto out;
}
if (i == dev->db_tab->max_group1) {
--dev->db_tab->max_group1; /* XXX may be able to unmap more pages now */
} if (i == dev->db_tab->min_group2)
++dev->db_tab->min_group2;
}
mutex_unlock(&dev->db_tab->mutex);
}
int mthca_init_db_tab(struct mthca_dev *dev)
{ int i;
if (!mthca_is_memfree(dev)) return 0;
dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); if (!dev->db_tab) return -ENOMEM;
for (i = 0; i < dev->db_tab->npages; ++i)
dev->db_tab->page[i].db_rec = NULL;
return 0;
}
void mthca_cleanup_db_tab(struct mthca_dev *dev)
{ int i;
if (!mthca_is_memfree(dev)) return;
/* * Because we don't always free our UARC pages when they * become empty to make mthca_free_db() simpler we need to * make a sweep through the doorbell pages and free any * leftover pages now.
*/ for (i = 0; i < dev->db_tab->npages; ++i) { if (!dev->db_tab->page[i].db_rec) continue;
if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.