/* * Size of per-cpu log buffers. Firmware requires that the buffer does * not cross a 4k boundary.
*/ staticint dtl_buf_entries = N_DISPATCH_LOG;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* * When CONFIG_VIRT_CPU_ACCOUNTING_NATIVE = y, the cpu accounting code controls * reading from the dispatch trace log. If other code wants to consume * DTL entries, it can set this pointer to a function that will get * called once for each DTL entry that gets processed.
*/ staticvoid (*dtl_consumer)(struct dtl_entry *entry, u64 index);
/* * The cpu accounting code controls the DTL ring buffer, and we get * given entries as they are processed.
*/ staticvoid consume_dtle(struct dtl_entry *dtle, u64 index)
{ struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings); struct dtl_entry *wp = dtlr->write_ptr; struct lppaca *vpa = local_paca->lppaca_ptr;
if (!wp) return;
*wp = *dtle;
barrier();
/* check for hypervisor ring buffer overflow, ignore this entry if so */ if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) return;
staticint dtl_start(struct dtl *dtl)
{ unsignedlong addr; int ret, hwcpu;
/* Register our dtl buffer with the hypervisor. The HV expects the
* buffer size to be passed in the second word of the buffer */
((u32 *)dtl->buf)[1] = cpu_to_be32(DISPATCH_LOG_BYTES);
hwcpu = get_hard_smp_processor_id(dtl->cpu);
addr = __pa(dtl->buf);
ret = register_dtl(hwcpu, addr); if (ret) {
printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); return -EIO;
}
/* set our initial buffer indices */
lppaca_of(dtl->cpu).dtl_idx = 0;
/* ensure that our updates to the lppaca fields have occurred before
* we actually enable the logging */
smp_wmb();
/* only allow one reader */ if (dtl->buf) return -EBUSY;
/* ensure there are no other conflicting dtl users */ if (!down_read_trylock(&dtl_access_lock)) return -EBUSY;
n_entries = dtl_buf_entries;
buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu)); if (!buf) {
printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n",
__func__, dtl->cpu);
up_read(&dtl_access_lock); return -ENOMEM;
}
spin_lock(&dtl->lock);
rc = -EBUSY; if (!dtl->buf) { /* store the original allocation size for use during read */
dtl->buf_entries = n_entries;
dtl->buf = buf;
dtl->last_idx = 0;
rc = dtl_start(dtl); if (rc)
dtl->buf = NULL;
}
spin_unlock(&dtl->lock);
if (rc) {
up_read(&dtl_access_lock);
kmem_cache_free(dtl_cache, buf);
}
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE /* * Scan the dispatch trace log and count up the stolen time. * Should be called with interrupts disabled.
*/ static notrace u64 scan_dispatch_log(u64 stop_tb)
{
u64 i = local_paca->dtl_ridx; struct dtl_entry *dtl = local_paca->dtl_curr; struct dtl_entry *dtl_end = local_paca->dispatch_log_end; struct lppaca *vpa = local_paca->lppaca_ptr;
u64 tb_delta;
u64 stolen = 0;
u64 dtb;
if (!dtl) return 0;
if (i == be64_to_cpu(vpa->dtl_idx)) return 0; while (i < be64_to_cpu(vpa->dtl_idx)) {
dtb = be64_to_cpu(dtl->timebase);
tb_delta = be32_to_cpu(dtl->enqueue_to_dispatch_time) +
be32_to_cpu(dtl->ready_to_enqueue_time);
barrier(); if (i + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx)) { /* buffer has overflowed */
i = be64_to_cpu(vpa->dtl_idx) - N_DISPATCH_LOG;
dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); continue;
} if (dtb > stop_tb) break; #ifdef CONFIG_DTL if (dtl_consumer)
dtl_consumer(dtl, i); #endif
stolen += tb_delta;
++i;
++dtl; if (dtl == dtl_end)
dtl = local_paca->dispatch_log;
}
local_paca->dtl_ridx = i;
local_paca->dtl_curr = dtl; return stolen;
}
/* * Accumulate stolen time by scanning the dispatch trace log. * Called on entry from user mode.
*/ void notrace pseries_accumulate_stolen_time(void)
{
u64 sst, ust; struct cpu_accounting_data *acct = &local_paca->accounting;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.