gicv5_its_dcache_clean(its, itt, num_ents * sizeof(*itt));
return 0;
}
/* * Allocate a two-level ITT. All ITT entries are allocated in one go, unlike * with the device table. Span may be used to limit the second level table * size, where possible.
*/ staticint gicv5_its_create_itt_two_level(struct gicv5_its_chip_data *its, struct gicv5_its_dev *its_dev, unsignedint event_id_bits, unsignedint itt_l2sz, unsignedint num_events)
{ unsignedint l1_bits, l2_bits, span, events_per_l2_table; unsignedint complete_tables, final_span, num_ents;
__le64 *itt_l1, *itt_l2, **l2ptrs; int i, ret;
u64 val;
ret = gicv5_its_l2sz_to_l2_bits(itt_l2sz); if (ret >= event_id_bits) {
pr_debug("Incorrect l2sz (0x%x) for %u EventID bits. Cannot allocate ITT\n",
itt_l2sz, event_id_bits); return -EINVAL;
}
l2_bits = ret;
l1_bits = event_id_bits - l2_bits;
num_ents = BIT(l1_bits);
itt_l1 = kcalloc(num_ents, sizeof(*itt_l1), GFP_KERNEL); if (!itt_l1) return -ENOMEM;
/* * Need to determine how many entries there are per L2 - this is based * on the number of bits in the table.
*/
events_per_l2_table = BIT(l2_bits);
complete_tables = num_events / events_per_l2_table;
final_span = order_base_2(num_events % events_per_l2_table);
for (i = 0; i < num_ents; i++) {
size_t l2sz;
span = i == complete_tables ? final_span : l2_bits;
itt_l2 = kcalloc(BIT(span), sizeof(*itt_l2), GFP_KERNEL); if (!itt_l2) {
ret = -ENOMEM; goto out_free;
}
/* * Function to check whether the device table or ITT table support * a two-level table and if so depending on the number of id_bits * requested, determine whether a two-level table is required. * * Return the 2-level size value if a two level table is deemed * necessary.
*/ staticbool gicv5_its_l2sz_two_level(bool devtab, u32 its_idr1, u8 id_bits, u8 *sz)
{ unsignedint l2_bits, l2_sz;
if (devtab && !FIELD_GET(GICV5_ITS_IDR1_DT_LEVELS, its_idr1)) returnfalse;
if (!devtab && !FIELD_GET(GICV5_ITS_IDR1_ITT_LEVELS, its_idr1)) returnfalse;
/* * Pick an L2 size that matches the pagesize; if a match * is not found, go for the smallest supported l2 size granule. * * This ensures that we will always be able to allocate * contiguous memory at L2.
*/ switch (PAGE_SIZE) { case SZ_64K: if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k; break;
}
fallthrough; case SZ_4K: if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; break;
}
fallthrough; case SZ_16K: if (GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(its_idr1)) {
l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_16k; break;
} if (GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(its_idr1)) {
l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_4k; break;
} if (GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(its_idr1)) {
l2_sz = GICV5_ITS_DT_ITT_CFGR_L2SZ_64k; break;
}
/* * Allocate a level 2 device table entry, update L1 parent to reference it. * Only used for 2-level device tables, and it is called on demand.
*/ staticint gicv5_its_alloc_l2_devtab(struct gicv5_its_chip_data *its, unsignedint l1_index)
{
__le64 *l2devtab, *l1devtab = its->devtab_cfgr.l2.l1devtab;
u8 span, l2sz, l2_bits;
u64 l1dte;
if (FIELD_GET(GICV5_DTL1E_VALID, le64_to_cpu(l1devtab[l1_index]))) return 0;
/* * Span allows us to create a smaller L2 device table. * If it is too large, use the number of allowed L2 bits.
*/ if (span > l2_bits)
span = l2_bits;
l2devtab = kcalloc(BIT(span), sizeof(*l2devtab), GFP_KERNEL); if (!l2devtab) return -ENOMEM;
if (alloc) { /* * Allocate a new L2 device table here before * continuing. We make the assumption that the span in * the L1 table has been set correctly, and blindly use * that value.
*/
ret = gicv5_its_alloc_l2_devtab(its, l1_idx); if (ret) return NULL;
}
/* * Register a new device in the device table. Allocate an ITT and * program the L2DTE entry according to the ITT structure that * was chosen.
*/ staticint gicv5_its_device_register(struct gicv5_its_chip_data *its, struct gicv5_its_dev *its_dev)
{
u8 event_id_bits, device_id_bits, itt_struct, itt_l2sz;
phys_addr_t itt_phys_base; bool two_level_itt;
u32 idr1, idr2;
__le64 *dte;
u64 val; int ret;
if (its_dev->device_id >= BIT(device_id_bits)) {
pr_err("Supplied DeviceID (%u) outside of Device Table range (%u)!",
its_dev->device_id, (u32)GENMASK(device_id_bits - 1, 0)); return -EINVAL;
}
dte = gicv5_its_devtab_get_dte_ref(its, its_dev->device_id, true); if (!dte) return -ENOMEM;
if (FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) return -EBUSY;
/* * Determine how many bits we need, validate those against the max. * Based on these, determine if we should go for a 1- or 2-level ITT.
*/
event_id_bits = order_base_2(its_dev->num_events);
ret = gicv5_its_device_cache_inv(its, its_dev); if (ret) {
its_write_table_entry(its, dte, 0);
gicv5_its_free_itt(its_dev); return ret;
}
return 0;
}
/* * Unregister a device in the device table. Lookup the device by ID, free the * corresponding ITT, mark the device as invalid in the device table.
*/ staticint gicv5_its_device_unregister(struct gicv5_its_chip_data *its, struct gicv5_its_dev *its_dev)
{
__le64 *dte;
if (!FIELD_GET(GICV5_DTL2E_VALID, le64_to_cpu(*dte))) {
pr_debug("Device table entry for DeviceID 0x%x is not valid. Nothing to clean up!",
its_dev->device_id); return -EINVAL;
}
/* Zero everything - make it clear that this is an invalid entry */
its_write_table_entry(its, dte, 0);
/* * Allocate a 1-level device table. All entries are allocated, but marked * invalid.
*/ staticint gicv5_its_alloc_devtab_linear(struct gicv5_its_chip_data *its,
u8 device_id_bits)
{
__le64 *devtab;
size_t sz;
u64 baser;
u32 cfgr;
/* * We expect a GICv5 implementation requiring a large number of * deviceID bits to support a 2-level device table. If that's not * the case, cap the number of deviceIDs supported according to the * kmalloc limits so that the system can chug along with a linear * device table.
*/
sz = BIT_ULL(device_id_bits) * sizeof(*devtab); if (sz > KMALLOC_MAX_SIZE) {
u8 device_id_cap = ilog2(KMALLOC_MAX_SIZE/sizeof(*devtab));
pr_warn("Limiting device ID bits from %u to %u\n",
device_id_bits, device_id_cap);
device_id_bits = device_id_cap;
}
devtab = kcalloc(BIT(device_id_bits), sizeof(*devtab), GFP_KERNEL); if (!devtab) return -ENOMEM;
/* * Allocate a 2-level device table. L2 entries are not allocated, * they are allocated on-demand.
*/ staticint gicv5_its_alloc_devtab_two_level(struct gicv5_its_chip_data *its,
u8 device_id_bits,
u8 devtab_l2sz)
{ unsignedint l1_bits, l2_bits, i;
__le64 *l1devtab, **l2ptrs;
size_t l1_sz;
u64 baser;
u32 cfgr;
l2_bits = gicv5_its_l2sz_to_l2_bits(devtab_l2sz);
l1_bits = device_id_bits - l2_bits;
l1_sz = BIT(l1_bits) * sizeof(*l1devtab); /* * With 2-level device table support it is highly unlikely * that we are not able to allocate the required amount of * device table memory to cover deviceID space; cap the * deviceID space if we encounter such set-up. * If this ever becomes a problem we could revisit the policy * behind level 2 size selection to reduce level-1 deviceID bits.
*/ if (l1_sz > KMALLOC_MAX_SIZE) {
l1_bits = ilog2(KMALLOC_MAX_SIZE/sizeof(*l1devtab));
pr_warn("Limiting device ID bits from %u to %u\n",
device_id_bits, l1_bits + l2_bits);
device_id_bits = l1_bits + l2_bits;
l1_sz = KMALLOC_MAX_SIZE;
}
l1devtab = kcalloc(BIT(l1_bits), sizeof(*l1devtab), GFP_KERNEL); if (!l1devtab) return -ENOMEM;
/* * Initialise the device table as either 1- or 2-level depending on what is * supported by the hardware.
*/ staticint gicv5_its_init_devtab(struct gicv5_its_chip_data *its)
{
u8 device_id_bits, devtab_l2sz; bool two_level_devtab;
u32 idr1;
staticstruct gicv5_its_dev *gicv5_its_alloc_device(struct gicv5_its_chip_data *its, int nvec,
u32 dev_id)
{ struct gicv5_its_dev *its_dev; void *entry; int ret;
its_dev = gicv5_its_find_device(its, dev_id); if (!IS_ERR(its_dev)) {
pr_err("A device with this DeviceID (0x%x) has already been registered.\n",
dev_id);
return ERR_PTR(-EBUSY);
}
its_dev = kzalloc(sizeof(*its_dev), GFP_KERNEL); if (!its_dev) return ERR_PTR(-ENOMEM);
if (!(info->flags & MSI_ALLOC_FLAGS_FIXED_MSG_DATA)) {
event_id_base = bitmap_find_free_region(its_dev->event_map,
its_dev->num_events,
get_count_order(nr_irqs)); if (event_id_base < 0) return event_id_base;
} else { /* * We want to have a fixed EventID mapped for hardcoded * message data allocations.
*/ if (WARN_ON_ONCE(nr_irqs != 1)) return -EINVAL;
event_id_base = info->hwirq;
if (event_id_base >= its_dev->num_events) {
pr_err("EventID ouside of ITT range; cannot allocate an ITT entry!\n");
return -EINVAL;
}
if (test_and_set_bit(event_id_base, its_dev->event_map)) {
pr_warn("Can't reserve event_id bitmap\n"); return -EINVAL;
cr0 = its_readl_relaxed(its_node, GICV5_ITS_CR0);
enabled = FIELD_GET(GICV5_ITS_CR0_ITSEN, cr0); if (WARN(enabled, "ITS %s enabled, disabling it before proceeding\n", np->full_name)) {
ret = gicv5_its_disable(its_node); if (ret) goto out_free_node;
}
if (of_property_read_bool(np, "dma-noncoherent")) { /* * A non-coherent ITS implies that some cache levels cannot be * used coherently by the cores and GIC. Our only option is to mark * memory attributes for the GIC as non-cacheable; by default, * non-cacheable memory attributes imply outer-shareable * shareability, the value written into ITS_CR1_SH is ignored.
*/
cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_NO_READ_ALLOC) |
FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_NO_READ_ALLOC) |
FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_NON_CACHE) |
FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_NON_CACHE);
its_node->flags |= ITS_FLAGS_NON_COHERENT;
} else {
cr1 = FIELD_PREP(GICV5_ITS_CR1_ITT_RA, GICV5_READ_ALLOC) |
FIELD_PREP(GICV5_ITS_CR1_DT_RA, GICV5_READ_ALLOC) |
FIELD_PREP(GICV5_ITS_CR1_IC, GICV5_WB_CACHE) |
FIELD_PREP(GICV5_ITS_CR1_OC, GICV5_WB_CACHE) |
FIELD_PREP(GICV5_ITS_CR1_SH, GICV5_INNER_SHARE);
}
its_writel_relaxed(its_node, cr1, GICV5_ITS_CR1);
ret = gicv5_its_init_devtab(its_node); if (ret) goto out_free_node;
ret = gicv5_its_enable(its_node); if (ret) goto out_free_devtab;
ret = gicv5_its_init_domain(its_node, parent_domain); if (ret) goto out_disable_its;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.