/* * sysfs cpumask attributes. For uncore PMU, we only have a single CPU to show
*/
ssize_t hisi_cpumask_sysfs_show(struct device *dev, struct device_attribute *attr, char *buf)
{ struct hisi_pmu *hisi_pmu = to_hisi_pmu(dev_get_drvdata(dev));
staticbool hisi_validate_event_group(struct perf_event *event)
{ struct perf_event *sibling, *leader = event->group_leader; struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); /* Include count for the event */ int counters = 1;
if (!is_software_event(leader)) { /* * We must NOT create groups containing mixed PMUs, although * software events are acceptable
*/ if (leader->pmu != event->pmu) returnfalse;
/* Increment counter for the leader */ if (leader != event)
counters++;
}
for_each_sibling_event(sibling, event->group_leader) { if (is_software_event(sibling)) continue; if (sibling->pmu != event->pmu) returnfalse; /* Increment counter for each sibling */
counters++;
}
/* The group can not count events more than the counters in the HW */ return counters <= hisi_pmu->num_counters;
}
int hisi_uncore_pmu_get_event_idx(struct perf_event *event)
{ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); unsignedlong *used_mask = hisi_pmu->pmu_events.used_mask;
u32 num_counters = hisi_pmu->num_counters; int idx;
idx = find_first_zero_bit(used_mask, num_counters); if (idx == num_counters) return -EAGAIN;
overflown = hisi_pmu->ops->get_int_status(hisi_pmu); if (!overflown) return IRQ_NONE;
/* * Find the counter index which overflowed if the bit was set * and handle it.
*/
for_each_set_bit(idx, &overflown, hisi_pmu->num_counters) { /* Write 1 to clear the IRQ status flag */
hisi_pmu->ops->clear_int_status(hisi_pmu, idx); /* Get the corresponding event struct */
event = hisi_pmu->pmu_events.hw_events[idx]; if (!event) continue;
if (event->attr.type != event->pmu->type) return -ENOENT;
/* * We do not support sampling as the counters are all * shared by all CPU cores in a CPU die(SCCL). Also we * do not support attach to a task(per-process mode)
*/ if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) return -EOPNOTSUPP;
/* * The uncore counters not specific to any CPU, so cannot * support per-task
*/ if (event->cpu < 0) return -EINVAL;
/* * Validate if the events in group does not exceed the * available counters in hardware.
*/ if (!hisi_validate_event_group(event)) return -EINVAL;
if (hisi_pmu->on_cpu == -1) return -EINVAL; /* * We don't assign an index until we actually place the event onto * hardware. Use -1 to signify that we haven't decided where to put it * yet.
*/
hwc->idx = -1;
hwc->config_base = event->attr.config;
if (hisi_pmu->ops->check_filter && hisi_pmu->ops->check_filter(event)) return -EINVAL;
/* Enforce to use the same CPU for all events in this PMU */
event->cpu = hisi_pmu->on_cpu;
/* * Set the counter to count the event that we're interested in, * and enable interrupt and counter.
*/ staticvoid hisi_uncore_pmu_enable_event(struct perf_event *event)
{ struct hisi_pmu *hisi_pmu = to_hisi_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw;
/* * The HiSilicon PMU counters support 32 bits or 48 bits, depending on * the PMU. We reduce it to 2^(counter_bits - 1) to account for the * extreme interrupt latency. So we could hopefully handle the overflow * interrupt before another 2^(counter_bits - 1) events occur and the * counter overtakes its previous value.
*/
u64 val = BIT_ULL(hisi_pmu->counter_bits - 1);
local64_set(&hwc->prev_count, val); /* Write start value to the hardware event counter */
hisi_pmu->ops->write_counter(hisi_pmu, hwc, val);
}
EXPORT_SYMBOL_NS_GPL(hisi_uncore_pmu_set_event_period, "HISI_PMU");
/* * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be * determined from the MPIDR_EL1, but the encoding varies by CPU: * * - For MT variants of TSV110: * SCCL is Aff2[7:3], CCL is Aff2[2:0] * * - For other MT parts: * SCCL is Aff3[7:0], CCL is Aff2[7:0] * * - For non-MT parts: * SCCL is Aff2[7:0], CCL is Aff1[7:0]
*/ staticvoid hisi_read_sccl_and_ccl_id(int *scclp, int *cclp)
{
u64 mpidr = read_cpuid_mpidr(); int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); bool mt = mpidr & MPIDR_MT_BITMASK; int sccl, ccl;
if (scclp)
*scclp = sccl; if (cclp)
*cclp = ccl;
}
/* * Check whether the CPU is associated with this uncore PMU
*/ staticbool hisi_pmu_cpu_is_associated_pmu(struct hisi_pmu *hisi_pmu)
{ struct hisi_pmu_topology *topo = &hisi_pmu->topo; int sccl_id, ccl_id;
if (topo->ccl_id == -1) { /* If CCL_ID is -1, the PMU only shares the same SCCL */
hisi_read_sccl_and_ccl_id(&sccl_id, NULL);
/* * If the CPU is not associated to PMU, initialize the hisi_pmu->on_cpu * based on the locality if it hasn't been initialized yet. For PMUs * do have associated CPUs, it'll be updated later.
*/ if (!hisi_pmu_cpu_is_associated_pmu(hisi_pmu)) { if (hisi_pmu->on_cpu != -1) return 0;
/* If another associated CPU is already managing this PMU, simply return. */ if (hisi_pmu->on_cpu != -1 &&
cpumask_test_cpu(hisi_pmu->on_cpu, &hisi_pmu->associated_cpus)) return 0;
/* Use this CPU in cpumask for event counting */
hisi_pmu->on_cpu = cpu;
/* Overflow interrupt also should use the same CPU */ if (hisi_pmu->irq > 0)
WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(cpu)));
/* Nothing to do if this CPU doesn't own the PMU */ if (hisi_pmu->on_cpu != cpu) return 0;
/* Give up ownership of the PMU */
hisi_pmu->on_cpu = -1;
/* * Migrate ownership of the PMU to a new CPU chosen from PMU's online * associated CPUs if possible, if no associated CPU online then * migrate to one online CPU.
*/
target = cpumask_any_and_but(&hisi_pmu->associated_cpus,
cpu_online_mask, cpu); if (target >= nr_cpu_ids)
target = cpumask_any_but(cpu_online_mask, cpu);
if (target >= nr_cpu_ids) return 0;
perf_pmu_migrate_context(&hisi_pmu->pmu, cpu, target); /* Use this CPU for event counting */
hisi_pmu->on_cpu = target;
if (hisi_pmu->irq > 0)
WARN_ON(irq_set_affinity(hisi_pmu->irq, cpumask_of(target)));
/* * Retrieve the topology information from the firmware for the hisi_pmu device. * The topology ID will be -1 if we cannot initialize it, it may either due to * the PMU doesn't locate on this certain topology or the firmware needs to be * fixed.
*/ void hisi_uncore_pmu_init_topology(struct hisi_pmu *hisi_pmu, struct device *dev)
{ struct hisi_pmu_topology *topo = &hisi_pmu->topo;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.