/* * The PMU registers start at 0xA00 in the DMC-620 memory map, and these * offsets are relative to that base. * * Each counter has a group of control/value registers, and the * DMC620_PMU_COUNTERn offsets are within a counter group. * * The counter registers groups start at 0xA10.
*/ #define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2 0x8 #define DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK \
(DMC620_PMU_CLKDIV2_MAX_COUNTERS - 1) #define DMC620_PMU_OVERFLOW_STATUS_CLK 0xC #define DMC620_PMU_OVERFLOW_STATUS_CLK_MASK \
(DMC620_PMU_CLK_MAX_COUNTERS - 1) #define DMC620_PMU_COUNTERS_BASE 0x10 #define DMC620_PMU_COUNTERn_MASK_31_00 0x0 #define DMC620_PMU_COUNTERn_MASK_63_32 0x4 #define DMC620_PMU_COUNTERn_MATCH_31_00 0x8 #define DMC620_PMU_COUNTERn_MATCH_63_32 0xC #define DMC620_PMU_COUNTERn_CONTROL 0x10 #define DMC620_PMU_COUNTERn_CONTROL_ENABLE BIT(0) #define DMC620_PMU_COUNTERn_CONTROL_INVERT BIT(1) #define DMC620_PMU_COUNTERn_CONTROL_EVENT_MUX GENMASK(6, 2) #define DMC620_PMU_COUNTERn_CONTROL_INCR_MUX GENMASK(8, 7) #define DMC620_PMU_COUNTERn_VALUE 0x20 /* Offset of the registers for a given counter, relative to 0xA00 */ #define DMC620_PMU_COUNTERn_OFFSET(n) \
(DMC620_PMU_COUNTERS_BASE + 0x28 * (n))
/* * dmc620_pmu_irqs_lock: protects dmc620_pmu_irqs list * dmc620_pmu_node_lock: protects pmus_node lists in all dmc620_pmu instances
*/ static DEFINE_MUTEX(dmc620_pmu_irqs_lock); static DEFINE_MUTEX(dmc620_pmu_node_lock); static LIST_HEAD(dmc620_pmu_irqs);
/* * We put all clkdiv2 and clk counters to a same array. * The first DMC620_PMU_CLKDIV2_MAX_COUNTERS bits belong to * clkdiv2 counters, the last DMC620_PMU_CLK_MAX_COUNTERS * belong to clk counters.
*/
DECLARE_BITMAP(used_mask, DMC620_PMU_MAX_COUNTERS); struct perf_event *events[DMC620_PMU_MAX_COUNTERS];
};
do { /* We may also be called from the irq handler */
prev_count = local64_read(&hwc->prev_count);
new_count = dmc620_pmu_read_counter(event);
} while (local64_cmpxchg(&hwc->prev_count,
prev_count, new_count) != prev_count);
delta = (new_count - prev_count) & DMC620_CNT_MAX_PERIOD;
local64_add(delta, &event->count);
}
/* * HW doesn't provide a control to atomically disable all counters. * To prevent race condition (overflow happens while clearing status register), * disable all events before continuing
*/ for (idx = 0; idx < DMC620_PMU_MAX_COUNTERS; idx++) {
event = dmc620_pmu->events[idx]; if (!event) continue;
dmc620_pmu_disable_counter(event);
}
status = readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
status |= (readl(dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK) <<
DMC620_PMU_CLKDIV2_MAX_COUNTERS); if (status) {
for_each_set_bit(idx, &status,
DMC620_PMU_MAX_COUNTERS) {
event = dmc620_pmu->events[idx]; if (WARN_ON_ONCE(!event)) continue;
dmc620_pmu_event_update(event);
dmc620_pmu_event_set_period(event);
}
if (status & DMC620_PMU_OVERFLOW_STATUS_CLKDIV2_MASK)
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
if (event->attr.type != event->pmu->type) return -ENOENT;
/* * DMC 620 PMUs are shared across all cpus and cannot * support task bound and sampling events.
*/ if (is_sampling_event(event) ||
event->attach_state & PERF_ATTACH_TASK) {
dev_dbg(dmc620_pmu->pmu.dev, "Can't support per-task counters\n"); return -EOPNOTSUPP;
}
/* * Many perf core operations (eg. events rotation) operate on a * single CPU context. This is obvious for CPU PMUs, where one * expects the same sets of events being observed on all CPUs, * but can lead to issues for off-core PMUs, where each * event could be theoretically assigned to a different CPU. To * mitigate this, we enforce CPU assignment to one, selected * processor.
*/
event->cpu = dmc620_pmu->irq->cpu; if (event->cpu < 0) return -EINVAL;
hwc->idx = -1;
if (event->group_leader == event) return 0;
/* * We can't atomically disable all HW counters so only one event allowed, * although software events are acceptable.
*/ if (!is_software_event(event->group_leader)) return -EINVAL;
target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) return 0;
/* We're only reading, but this isn't the place to be involving RCU */
mutex_lock(&dmc620_pmu_node_lock);
list_for_each_entry(dmc620_pmu, &irq->pmus_node, pmus_node)
perf_pmu_migrate_context(&dmc620_pmu->pmu, irq->cpu, target);
mutex_unlock(&dmc620_pmu_node_lock);
dmc620_pmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(dmc620_pmu->base)) return PTR_ERR(dmc620_pmu->base);
/* Make sure device is reset before enabling interrupt */ for (i = 0; i < DMC620_PMU_MAX_COUNTERS; i++)
dmc620_pmu_creg_write(dmc620_pmu, i, DMC620_PMU_COUNTERn_CONTROL, 0);
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLKDIV2);
writel(0, dmc620_pmu->base + DMC620_PMU_OVERFLOW_STATUS_CLK);
irq_num = platform_get_irq(pdev, 0); if (irq_num < 0) return irq_num;
ret = dmc620_pmu_get_irq(dmc620_pmu, irq_num); if (ret) return ret;
name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%llx", DMC620_PMUNAME,
(u64)(res->start >> DMC620_PA_SHIFT)); if (!name) {
dev_err(&pdev->dev, "Create name failed, PMU @%pa\n", &res->start);
ret = -ENOMEM; goto out_teardown_dev;
}
ret = perf_pmu_register(&dmc620_pmu->pmu, name, -1); if (ret) goto out_teardown_dev;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.