// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018 Arm Limited. All rights reserved. * * Coresight Address Translation Unit support * * Author: Suzuki K Poulose <suzuki.poulose@arm.com>
*/
/* * CATU uses a page size of 4KB for page tables as well as data pages. * Each 64bit entry in the table has the following format. * * 63 12 1 0 * ------------------------------------ * | Address [63-12] | SBZ | V| * ------------------------------------ * * Where bit[0] V indicates if the address is valid or not. * Each 4K table pages have upto 256 data page pointers, taking upto 2K * size. There are two Link pointers, pointing to the previous and next * table pages respectively at the end of the 4K page. (i.e, entry 510 * and 511). * E.g, a table of two pages could look like : * * Table Page 0 Table Page 1 * SLADDR ===> x------------------x x--> x-----------------x * INADDR ->| Page 0 | V | | | Page 256 | V | <- INADDR+1M * |------------------| | |-----------------| * INADDR+4K ->| Page 1 | V | | | | * |------------------| | |-----------------| * | Page 2 | V | | | | * |------------------| | |-----------------| * | ... | V | | | ... | * |------------------| | |-----------------| * INADDR+1020K| Page 255 | V | | | Page 511 | V | * SLADDR+2K==>|------------------| | |-----------------| * | UNUSED | | | | | * |------------------| | | | * | UNUSED | | | | | * |------------------| | | | * | ... | | | | | * |------------------| | |-----------------| * | IGNORED | 0 | | | Table Page 0| 1 | * |------------------| | |-----------------| * | Table Page 1| 1 |--x | IGNORED | 0 | * x------------------x x-----------------x * SLADDR+4K==> * * The base input address (used by the ETR, programmed in INADDR_{LO,HI}) * must be aligned to 1MB (the size addressable by a single page table). * The CATU maps INADDR{LO:HI} to the first page in the table pointed * to by SLADDR{LO:HI} and so on. *
*/ typedef u64 cate_t;
/* CATU expects the INADDR to be aligned to 1M. */ #define CATU_DEFAULT_INADDR (1ULL << 20)
/* * catu_get_table : Retrieve the table pointers for the given @offset * within the buffer. The buffer is wrapped around to a valid offset. * * Returns : The CPU virtual address for the beginning of the table * containing the data page pointer for @offset. If @daddrp is not NULL, * @daddrp points the DMA address of the beginning of the table.
*/ static cate_t *catu_get_table(struct tmc_sg_table *catu_table, unsignedlong offset,
dma_addr_t *daddrp)
{ unsignedlong buf_size = tmc_sg_table_buf_size(catu_table); unsignedint table_nr, pg_idx, pg_offset; struct tmc_pages *table_pages = &catu_table->table_pages; void *ptr;
/* Make sure offset is within the range */
offset %= buf_size;
/* * Each table can address 1MB and a single kernel page can * contain "CATU_PAGES_PER_SYSPAGE" CATU tables.
*/
table_nr = offset >> 20; /* Find the table page where the table_nr lies in */
pg_idx = table_nr / CATU_PAGES_PER_SYSPAGE;
pg_offset = (table_nr % CATU_PAGES_PER_SYSPAGE) * CATU_PAGE_SIZE; if (daddrp)
*daddrp = table_pages->daddrs[pg_idx] + pg_offset;
ptr = page_address(table_pages->pages[pg_idx]); return (cate_t *)((unsignedlong)ptr + pg_offset);
}
/* * catu_populate_table : Populate the given CATU table. * The table is always populated as a circular table. * i.e, the "prev" link of the "first" table points to the "last" * table and the "next" link of the "last" table points to the * "first" table. The buffer should be made linear by calling * catu_set_table().
*/ staticvoid
catu_populate_table(struct tmc_sg_table *catu_table)
{ int i; int sys_pidx; /* Index to current system data page */ int catu_pidx; /* Index of CATU page within the system data page */ unsignedlong offset, buf_size, table_end;
dma_addr_t data_daddr;
dma_addr_t prev_taddr, next_taddr, cur_taddr;
cate_t *table_ptr, *next_table;
table_ptr = catu_get_table(catu_table, 0, &cur_taddr);
prev_taddr = 0; /* Prev link for the first table */
while (offset < buf_size) { /* * The @offset is always 1M aligned here and we have an * empty table @table_ptr to fill. Each table can address * upto 1MB data buffer. The last table may have fewer * entries if the buffer size is not aligned.
*/
table_end = (offset + SZ_1M) < buf_size ?
(offset + SZ_1M) : buf_size; for (i = 0; offset < table_end;
i++, offset += CATU_PAGE_SIZE) {
data_daddr = catu_table->data_pages.daddrs[sys_pidx] +
catu_pidx * CATU_PAGE_SIZE;
catu_dbg(catu_table->dev, "[table %5ld:%03d] 0x%llx\n",
(offset >> 20), i, data_daddr);
table_ptr[i] = catu_make_entry(data_daddr); /* Move the pointers for data pages */
catu_pidx = (catu_pidx + 1) % CATU_PAGES_PER_SYSPAGE; if (catu_pidx == 0)
sys_pidx++;
}
/* * If we have finished all the valid entries, fill the rest of * the table (i.e, last table page) with invalid entries, * to fail the lookups.
*/ if (offset == buf_size) {
memset(&table_ptr[i], 0, sizeof(cate_t) * (CATU_PTRS_PER_PAGE - i));
next_taddr = 0;
} else {
next_table = catu_get_table(catu_table,
offset, &next_taddr);
}
/* Update the prev/next addresses */ if (next_taddr) {
prev_taddr = cur_taddr;
cur_taddr = next_taddr;
table_ptr = next_table;
}
}
/* Sync the table for device */
tmc_sg_table_sync_table(catu_table);
}
staticstruct tmc_sg_table *
catu_init_sg_table(struct device *catu_dev, int node,
ssize_t size, void **pages)
{ int nr_tpages; struct tmc_sg_table *catu_table;
/* * Each table can address upto 1MB and we can have * CATU_PAGES_PER_SYSPAGE tables in a system page.
*/
nr_tpages = DIV_ROUND_UP(size, CATU_PAGES_PER_SYSPAGE * SZ_1M);
catu_table = tmc_alloc_sg_table(catu_dev, node, nr_tpages,
size >> PAGE_SHIFT, pages); if (IS_ERR(catu_table)) return catu_table;
/* * ETR started off at etr_buf->hwaddr. Convert the RRP/RWP to * offsets within the trace buffer.
*/
r_offset = rrp - etr_buf->hwaddr;
w_offset = rwp - etr_buf->hwaddr;
drvdata->atclk = devm_clk_get_optional_enabled(dev, "atclk"); if (IS_ERR(drvdata->atclk)) return PTR_ERR(drvdata->atclk);
catu_desc.name = coresight_alloc_device_name(&catu_devs, dev); if (!catu_desc.name) return -ENOMEM;
base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) {
ret = PTR_ERR(base); goto out;
}
/* Setup dma mask for the device */
dma_mask = readl_relaxed(base + CORESIGHT_DEVID) & 0x3f; switch (dma_mask) { case 32: case 40: case 44: case 48: case 52: case 56: case 64: break; default: /* Default to the 40bits as supported by TMC-ETR */
dma_mask = 40;
}
ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(dma_mask)); if (ret) goto out;
pdata = coresight_get_platform_data(dev); if (IS_ERR(pdata)) {
ret = PTR_ERR(pdata); goto out;
}
dev->platform_data = pdata;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.