/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2016 Red Hat, Inc. * Author: Michael S. Tsirkin <mst@redhat.com> * * Common macros and functions for ring benchmarking.
*/ #ifndef MAIN_H #define MAIN_H
staticinlinevoid vmexit(void)
{ if (!do_exit) return;
wait_cycles(VMEXIT_CYCLES);
} staticinlinevoid vmentry(void)
{ if (!do_exit) return;
wait_cycles(VMENTRY_CYCLES);
}
/* implemented by ring */ void alloc_ring(void); /* guest side */ int add_inbuf(unsigned, void *, void *); void *get_buf(unsigned *, void **); void disable_call(); bool used_empty(); bool enable_call(); void kick_available(); /* host side */ void disable_kick(); bool avail_empty(); bool enable_kick(); bool use_buf(unsigned *, void **); void call_used();
/* implemented by main */ externbool do_sleep; void kick(void); void wait_for_kick(void); void call(void); void wait_for_call(void);
externunsigned ring_size;
/* Compiler barrier - similar to what Linux uses */ #define barrier() asmvolatile("" ::: "memory")
/* Is there a portable way to do this? */ #ifdefined(__x86_64__) || defined(__i386__) #define cpu_relax() asm ("rep; nop" ::: "memory") #elifdefined(__s390x__) #define cpu_relax() barrier() #elifdefined(__aarch64__) #define cpu_relax() asm ("yield" ::: "memory") #else #define cpu_relax() assert(0) #endif
externbool do_relax;
staticinlinevoid busy_wait(void)
{ if (do_relax)
cpu_relax(); else /* prevent compiler from removing busy loops */
barrier();
}
#ifdefined(__x86_64__) || defined(__i386__) #define smp_mb() asmvolatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc") #elifdefined(__aarch64__) #define smp_mb() asmvolatile("dmb ish" ::: "memory") #else /* * Not using __ATOMIC_SEQ_CST since gcc docs say they are only synchronized * with other __ATOMIC_SEQ_CST calls.
*/ #define smp_mb() __sync_synchronize() #endif
/* * This abuses the atomic builtins for thread fences, and * adds a compiler barrier.
*/ #define smp_release() do { \
barrier(); \
__atomic_thread_fence(__ATOMIC_RELEASE); \
} while (0)
#define smp_acquire() do { \
__atomic_thread_fence(__ATOMIC_ACQUIRE); \
barrier(); \
} while (0)
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.