/* unlocked read of current trace ID value for given CPU */ staticint _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{ return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
}
/* look for next available odd ID, return 0 if none found */ staticint coresight_trace_id_find_odd_id(struct coresight_trace_id_map *id_map)
{ int found_id = 0, bit = 1, next_id;
while ((bit < CORESIGHT_TRACE_ID_RES_TOP) && !found_id) { /* * bitmap length of CORESIGHT_TRACE_ID_RES_TOP, * search from offset `bit`.
*/
next_id = find_next_zero_bit(id_map->used_ids,
CORESIGHT_TRACE_ID_RES_TOP, bit); if ((next_id < CORESIGHT_TRACE_ID_RES_TOP) && (next_id & 0x1))
found_id = next_id; else
bit = next_id + 1;
} return found_id;
}
/* * Allocate new ID and set in use * * if @preferred_id is a valid id then try to use that value if available. * if @preferred_id is not valid and @prefer_odd_id is true, try for odd id. * * Otherwise allocate next available ID.
*/ staticint coresight_trace_id_alloc_new_id(struct coresight_trace_id_map *id_map, int preferred_id, unsignedint flags)
{ int id = 0;
/* for backwards compatibility, cpu IDs may use preferred value */ if (IS_VALID_CS_TRACE_ID(preferred_id)) { if (!test_bit(preferred_id, id_map->used_ids)) {
id = preferred_id; goto trace_id_allocated;
} elseif (flags & TRACE_ID_REQ_STATIC) return -EBUSY;
} elseif (flags & TRACE_ID_PREFER_ODD) { /* may use odd ids to avoid preferred legacy cpu IDs */
id = coresight_trace_id_find_odd_id(id_map); if (id) goto trace_id_allocated;
} elseif (!IS_VALID_CS_TRACE_ID(preferred_id) &&
(flags & TRACE_ID_REQ_STATIC)) return -EINVAL;
/* * skip reserved bit 0, look at bitmap length of * CORESIGHT_TRACE_ID_RES_TOP from offset of bit 1.
*/
id = find_next_zero_bit(id_map->used_ids, CORESIGHT_TRACE_ID_RES_TOP, 1); if (id >= CORESIGHT_TRACE_ID_RES_TOP) return -EINVAL;
/* mark as used */
trace_id_allocated:
set_bit(id, id_map->used_ids); return id;
}
staticvoid coresight_trace_id_free(int id, struct coresight_trace_id_map *id_map)
{ if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id)) return; if (WARN(!test_bit(id, id_map->used_ids), "Freeing unused ID %d\n", id)) return;
clear_bit(id, id_map->used_ids);
}
/* * Release all IDs and clear CPU associations.
*/ staticvoid coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
{ unsignedlong flags; int cpu;
/* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu, id_map); if (id) goto get_cpu_id_out_unlock;
/* * Find a new ID. * * Use legacy values where possible in the dynamic trace ID allocator to * allow older tools to continue working if they are not upgraded at the * same time as the kernel drivers. * * If the generated legacy ID is invalid, or not available then the next * available dynamic ID will be used.
*/
id = coresight_trace_id_alloc_new_id(id_map,
CORESIGHT_LEGACY_CPU_TRACE_ID(cpu),
TRACE_ID_ANY); if (!IS_VALID_CS_TRACE_ID(id)) goto get_cpu_id_out_unlock;
/* allocate the new id to the cpu */
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
int coresight_trace_id_get_system_id(void)
{ /* prefer odd IDs for system components to avoid legacy CPU IDS */ return coresight_trace_id_map_get_system_id(&id_map_default, 0,
TRACE_ID_PREFER_ODD);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_system_id);
int coresight_trace_id_get_static_system_id(int trace_id)
{ return coresight_trace_id_map_get_system_id(&id_map_default,
trace_id, TRACE_ID_REQ_STATIC);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_static_system_id);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.