/* * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu> * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/ #include"event2/event-config.h" #include"evconfig-private.h"
staticint evthread_make_base_notifiable_nolock_(struct event_base *base); staticint event_del_(struct event *ev, int blocking);
#ifdef USE_REINSERT_TIMEOUT /* This code seems buggy; only turn it on if we find out what the trouble is. */ staticvoid event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx); #endif
#ifndef EVENT__DISABLE_DEBUG_MODE /* These functions implement a hashtable of which 'struct event *' structures * have been setup or added. We don't want to trust the content of the struct * event itself, since we're trying to work through cases where an event gets * clobbered or freed. Instead, we keep a hashtable indexed by the pointer.
*/
staticinlineunsigned
hash_debug_entry(conststruct event_debug_entry *e)
{ /* We need to do this silliness to convince compilers that we * honestly mean to cast e->ptr to an integer, and discard any * part of it that doesn't fit in an unsigned.
*/ unsigned u = (unsigned) ((ev_uintptr_t) e->ptr); /* Our hashtable implementation is pretty sensitive to low bits, * and every struct event is over 64 bytes in size, so we can
* just say >>6. */ return (u >> 6);
}
#if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE) /** * @brief debug mode variable which is set for any function/structure that needs * to be shared across threads (if thread support is enabled). * * When and if evthreads are initialized, this variable will be evaluated, * and if set to something other than zero, this means the evthread setup * functions were called out of order. * * See: "Locks and threading" in the documentation.
*/ int event_debug_created_threadable_ctx_ = 0; #endif
/* Set if it's too late to enable event_debug_mode. */ staticint event_debug_mode_too_late = 0; #ifndef EVENT__DISABLE_THREAD_SUPPORT staticvoid *event_debug_map_lock_ = NULL; #endif static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
HT_INITIALIZER();
/* record that ev is now setup (that is, ready for an add) */ staticvoid event_debug_note_setup_(conststruct event *ev)
{ struct event_debug_entry *dent, find;
out:
event_debug_mode_too_late = 1;
} /* record that ev is no longer setup */ staticvoid event_debug_note_teardown_(conststruct event *ev)
{ struct event_debug_entry *dent, find;
out:
event_debug_mode_too_late = 1;
} /* Macro: record that ev is now added */ staticvoid event_debug_note_add_(conststruct event *ev)
{ struct event_debug_entry *dent,find;
out:
event_debug_mode_too_late = 1;
} /* record that ev is no longer added */ staticvoid event_debug_note_del_(conststruct event *ev)
{ struct event_debug_entry *dent, find;
if (!event_debug_mode_on_) goto out;
find.ptr = ev;
EVLOCK_LOCK(event_debug_map_lock_, 0);
dent = HT_FIND(event_debug_map, &global_debug_map, &find); if (dent) {
dent->added = 0;
} else {
event_errx(EVENT_ERR_ABORT_, "%s: noting a del on a non-setup event %p" " (events: 0x%x, fd: "EV_SOCK_FMT ", flags: 0x%x)",
__func__, ev, ev->ev_events,
EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
}
EVLOCK_UNLOCK(event_debug_map_lock_, 0);
out:
event_debug_mode_too_late = 1;
} /* assert that ev is setup (i.e., okay to add or inspect) */ staticvoid event_debug_assert_is_setup_(conststruct event *ev)
{ struct event_debug_entry *dent, find;
if (!event_debug_mode_on_) return;
find.ptr = ev;
EVLOCK_LOCK(event_debug_map_lock_, 0);
dent = HT_FIND(event_debug_map, &global_debug_map, &find); if (!dent) {
event_errx(EVENT_ERR_ABORT_, "%s called on a non-initialized event %p" " (events: 0x%x, fd: "EV_SOCK_FMT ", flags: 0x%x)",
__func__, ev, ev->ev_events,
EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
}
EVLOCK_UNLOCK(event_debug_map_lock_, 0);
} /* assert that ev is not added (i.e., okay to tear down or set up again) */ staticvoid event_debug_assert_not_added_(conststruct event *ev)
{ struct event_debug_entry *dent, find;
if (!event_debug_mode_on_) return;
find.ptr = ev;
EVLOCK_LOCK(event_debug_map_lock_, 0);
dent = HT_FIND(event_debug_map, &global_debug_map, &find); if (dent && dent->added) {
event_errx(EVENT_ERR_ABORT_, "%s called on an already added event %p" " (events: 0x%x, fd: "EV_SOCK_FMT", " "flags: 0x%x)",
__func__, ev, ev->ev_events,
EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
}
EVLOCK_UNLOCK(event_debug_map_lock_, 0);
} staticvoid event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
{ if (!event_debug_mode_on_) return; if (fd < 0) return;
/* How often (in seconds) do we check for changes in wall clock time relative
* to monotonic time? Set this to -1 for 'never.' */ #define CLOCK_SYNC_INTERVAL 5
/** Set 'tp' to the current time according to 'base'. We must hold the lock * on 'base'. If there is a cached time, return it. Otherwise, use * clock_gettime or gettimeofday as appropriate to find out the right time. * Return 0 on success, -1 on failure.
*/ staticint
gettime(struct event_base *base, struct timeval *tp)
{
EVENT_BASE_ASSERT_LOCKED(base);
if (base->tv_cache.tv_sec) {
*tp = base->tv_cache; return (0);
}
if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) { return -1;
}
int
event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
{ int r; if (!base) {
base = current_base; if (!current_base) return evutil_gettimeofday(tv, NULL);
}
EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (base->tv_cache.tv_sec == 0) {
r = evutil_gettimeofday(tv, NULL);
} else {
evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
r = 0;
}
EVBASE_RELEASE_LOCK(base, th_base_lock); return r;
}
/** Make 'base' have no current cached time. */ staticinlinevoid
clear_time_cache(struct event_base *base)
{
base->tv_cache.tv_sec = 0;
}
/** Replace the cached time in 'base' with the current time. */ staticinlinevoid
update_time_cache(struct event_base *base)
{
base->tv_cache.tv_sec = 0; if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
gettime(base, &base->tv_cache);
}
int
event_base_update_cache_time(struct event_base *base)
{
if (!base) {
base = current_base; if (!current_base) return -1;
}
EVBASE_ACQUIRE_LOCK(base, th_base_lock); if (base->running_loop)
update_time_cache(base);
EVBASE_RELEASE_LOCK(base, th_base_lock); return 0;
}
/** Return true iff 'method' is the name of a method that 'cfg' tells us to
* avoid. */ staticint
event_config_is_avoided_method(conststruct event_config *cfg, constchar *method)
{ struct event_config_entry *entry;
/** Return true iff 'method' is disabled according to the environment. */ staticint
event_is_method_disabled(constchar *name)
{ char environment[64]; int i;
evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name); for (i = 8; environment[i] != '\0'; ++i)
environment[i] = EVUTIL_TOUPPER_(environment[i]); /* Note that evutil_getenv_() ignores the environment entirely if
* we're setuid */ return (evutil_getenv_(environment) != NULL);
}
int
event_base_get_features(conststruct event_base *base)
{ return base->evsel->features;
}
void
event_enable_debug_mode(void)
{ #ifndef EVENT__DISABLE_DEBUG_MODE if (event_debug_mode_on_)
event_errx(1, "%s was called twice!", __func__); if (event_debug_mode_too_late)
event_errx(1, "%s must be called *before* creating any events " "or event_bases",__func__);
for (i = 0; eventops[i] && !base->evbase; i++) { if (cfg != NULL) { /* determine if this backend should be avoided */ if (event_config_is_avoided_method(cfg,
eventops[i]->name)) continue; if ((eventops[i]->features & cfg->require_features)
!= cfg->require_features) continue;
}
/* also obey the environment variables */ if (should_check_environment &&
event_is_method_disabled(eventops[i]->name)) continue;
base->evsel = eventops[i];
base->evbase = base->evsel->init(base);
}
if (base->evbase == NULL) {
event_warnx("%s: no event mechanism available",
__func__);
base->evsel = NULL;
event_base_free(base); return NULL;
}
if (evutil_getenv_("EVENT_SHOW_METHOD"))
event_msgx("libevent using: %s", base->evsel->name);
/* allocate a single active event queue */ if (event_base_priority_init(base, 1) < 0) {
event_base_free(base); return NULL;
}
#ifndef EVENT__DISABLE_THREAD_SUPPORT if (EVTHREAD_LOCKING_ENABLED() &&
(!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) { int r;
EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
EVTHREAD_ALLOC_COND(base->current_event_cond);
r = evthread_make_base_notifiable(base); if (r<0) {
event_warnx("%s: Unable to make base notifiable.", __func__);
event_base_free(base); return NULL;
}
} #endif
staticvoid
event_base_free_(struct event_base *base, int run_finalizers)
{ int i, n_deleted=0; struct event *ev; /* XXXX grab the lock? If there is contention when one thread frees
* the base, then the contending thread will be very sad soon. */
/* event_base_free(NULL) is how to free the current_base if we
* made it with event_init and forgot to hold a reference to it. */ if (base == NULL && current_base)
base = current_base; /* Don't actually free NULL. */ if (base == NULL) {
event_warnx("%s: no base to free", __func__); return;
} /* XXX(niels) - check for internal events first */
#ifdef _WIN32
event_base_stop_iocp_(base); #endif
/* threading fds if we have them */ if (base->th_notify_fd[0] != -1) {
event_del(&base->th_notify);
EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); if (base->th_notify_fd[1] != -1)
EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
base->th_notify_fd[0] = -1;
base->th_notify_fd[1] = -1;
event_debug_unassign(&base->th_notify);
}
/* Delete all non-internal events. */
evmap_delete_all_(base);
while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
event_del(ev);
++n_deleted;
} for (i = 0; i < base->n_common_timeouts; ++i) { struct common_timeout_list *ctl =
base->common_timeout_queues[i];
event_del(&ctl->timeout_event); /* Internal; doesn't count */
event_debug_unassign(&ctl->timeout_event); for (ev = TAILQ_FIRST(&ctl->events); ev; ) { struct event *next = TAILQ_NEXT(ev,
ev_timeout_pos.ev_next_with_common_timeout); if (!(ev->ev_flags & EVLIST_INTERNAL)) {
event_del(ev);
++n_deleted;
}
ev = next;
}
mm_free(ctl);
} if (base->common_timeout_queues)
mm_free(base->common_timeout_queues);
for (;;) { /* For finalizers we can register yet another finalizer out from * finalizer, and iff finalizer will be in active_later_queue we can * add finalizer to activequeues, and we will have events in * activequeues after this function returns, which is not what we want * (we even have an assertion for this). * * A simple case is bufferevent with underlying (i.e. filters).
*/ int i = event_base_free_queues_(base, run_finalizers);
event_debug(("%s: %d events freed", __func__, i)); if (!i) { break;
}
n_deleted += i;
}
if (n_deleted)
event_debug(("%s: %d events were still set in base",
__func__, n_deleted));
/* Fake eventop; used to disable the backend temporarily inside event_reinit * so that we can call event_del() on an event without telling the backend.
*/ staticint
nil_backend_del(struct event_base *b, evutil_socket_t fd, short old, short events, void *fdinfo)
{ return 0;
} conststruct eventop nil_eventop = { "nil",
NULL, /* init: unused. */
NULL, /* add: unused. */
nil_backend_del, /* del: used, so needs to be killed. */
NULL, /* dispatch: unused. */
NULL, /* dealloc: unused. */
0, 0, 0
};
/* reinitialize the event base after a fork */ int
event_reinit(struct event_base *base)
{ conststruct eventop *evsel; int res = 0; int was_notifiable = 0; int had_signal_added = 0;
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
evsel = base->evsel;
/* check if this event mechanism requires reinit on the backend */ if (evsel->need_reinit) { /* We're going to call event_del() on our notify events (the * ones that tell about signals and wakeup events). But we * don't actually want to tell the backend to change its * state, since it might still share some resource (a kqueue, * an epoll fd) with the parent process, and we don't want to * delete the fds from _that_ backend, we temporarily stub out * the evsel with a replacement.
*/
base->evsel = &nil_eventop;
}
/* We need to re-create a new signal-notification fd and a new * thread-notification fd. Otherwise, we'll still share those with * the parent process, which would make any notification sent to them * get received by one or both of the event loops, more or less at * random.
*/ if (base->sig.ev_signal_added) {
event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
event_debug_unassign(&base->sig.ev_signal);
memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
had_signal_added = 1;
base->sig.ev_signal_added = 0;
} if (base->sig.ev_signal_pair[0] != -1)
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]); if (base->sig.ev_signal_pair[1] != -1)
EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]); if (base->th_notify_fn != NULL) {
was_notifiable = 1;
base->th_notify_fn = NULL;
} if (base->th_notify_fd[0] != -1) {
event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); if (base->th_notify_fd[1] != -1)
EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
base->th_notify_fd[0] = -1;
base->th_notify_fd[1] = -1;
event_debug_unassign(&base->th_notify);
}
/* Replace the original evsel. */
base->evsel = evsel;
if (evsel->need_reinit) { /* Reconstruct the backend through brute-force, so that we do * not share any structures with the parent process. For some * backends, this is necessary: epoll and kqueue, for * instance, have events associated with a kernel * structure. If didn't reinitialize, we'd share that * structure with the parent process, and any changes made by * the parent would affect our backend's behavior (and vice * versa).
*/ if (base->evsel->dealloc != NULL)
base->evsel->dealloc(base);
base->evbase = evsel->init(base); if (base->evbase == NULL) {
event_errx(1, "%s: could not reinitialize event mechanism",
__func__);
res = -1; goto done;
}
/* Empty out the changelist (if any): we are starting from a
* blank slate. */
event_changelist_freemem_(&base->changelist);
/* Tell the event maps to re-inform the backend about all * pending events. This will make the signal notification
* event get re-created if necessary. */ if (evmap_reinit_(base) < 0)
res = -1;
} else {
res = evsig_init_(base); if (res == 0 && had_signal_added) {
res = event_add_nolock_(&base->sig.ev_signal, NULL, 0); if (res == 0)
base->sig.ev_signal_added = 1;
}
}
/* If we were notifiable before, and nothing just exploded, become
* notifiable again. */ if (was_notifiable && res == 0)
res = evthread_make_base_notifiable_nolock_(base);
for (i = 0; i < base->nactivequeues; ++i) {
TAILQ_INIT(&base->activequeues[i]);
}
ok:
r = 0;
err:
EVBASE_RELEASE_LOCK(base, th_base_lock); return (r);
}
int
event_base_get_npriorities(struct event_base *base)
{
int n; if (base == NULL)
base = current_base;
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
n = base->nactivequeues;
EVBASE_RELEASE_LOCK(base, th_base_lock); return (n);
}
int
event_base_get_num_events(struct event_base *base, unsignedint type)
{ int r = 0;
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (type & EVENT_BASE_COUNT_ACTIVE)
r += base->event_count_active;
if (type & EVENT_BASE_COUNT_VIRTUAL)
r += base->virtual_event_count;
if (type & EVENT_BASE_COUNT_ADDED)
r += base->event_count;
EVBASE_RELEASE_LOCK(base, th_base_lock);
return r;
}
int
event_base_get_max_events(struct event_base *base, unsignedint type, int clear)
{ int r = 0;
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (type & EVENT_BASE_COUNT_ACTIVE) {
r += base->event_count_active_max; if (clear)
base->event_count_active_max = 0;
}
if (type & EVENT_BASE_COUNT_VIRTUAL) {
r += base->virtual_event_count_max; if (clear)
base->virtual_event_count_max = 0;
}
if (type & EVENT_BASE_COUNT_ADDED) {
r += base->event_count_max; if (clear)
base->event_count_max = 0;
}
EVBASE_RELEASE_LOCK(base, th_base_lock);
return r;
}
/* Returns true iff we're currently watching any events. */ staticint
event_haveevents(struct event_base *base)
{ /* Caller must hold th_base_lock */ return (base->virtual_event_count > 0 || base->event_count > 0);
}
/* "closure" function called when processing active signal events */ staticinlinevoid
event_signal_closure(struct event_base *base, struct event *ev)
{ short ncalls; int should_break;
/* Allows deletes to work */
ncalls = ev->ev_ncalls; if (ncalls != 0)
ev->ev_pncalls = &ncalls;
EVBASE_RELEASE_LOCK(base, th_base_lock); while (ncalls) {
ncalls--;
ev->ev_ncalls = ncalls; if (ncalls == 0)
ev->ev_pncalls = NULL;
(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
if (should_break) { if (ncalls != 0)
ev->ev_pncalls = NULL; return;
}
}
}
/* Common timeouts are special timeouts that are handled as queues rather than * in the minheap. This is more efficient than the minheap if we happen to * know that we're going to get several thousands of timeout events all with * the same timeout value. * * Since all our timeout handling code assumes timevals can be copied, * assigned, etc, we can't use "magic pointer" to encode these common * timeouts. Searching through a list to see if every timeout is common could * also get inefficient. Instead, we take advantage of the fact that tv_usec * is 32 bits long, but only uses 20 of those bits (since it can never be over * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits * of index into the event_base's aray of common timeouts.
*/
/** Return true iff if 'tv' is a common timeout in 'base' */ staticinlineint
is_common_timeout(conststruct timeval *tv, conststruct event_base *base)
{ int idx; if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC) return 0;
idx = COMMON_TIMEOUT_IDX(tv); return idx < base->n_common_timeouts;
}
/* True iff tv1 and tv2 have the same common-timeout index, or if neither
* one is a common timeout. */ staticinlineint
is_same_common_timeout(conststruct timeval *tv1, conststruct timeval *tv2)
{ return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
(tv2->tv_usec & ~MICROSECONDS_MASK);
}
/** Requires that 'tv' is a common timeout. Return the corresponding
* common_timeout_list. */ staticinlinestruct common_timeout_list *
get_common_timeout_list(struct event_base *base, conststruct timeval *tv)
{ return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
}
/* Add the timeout for the first event in given common timeout list to the
* event_base's minheap. */ staticvoid
common_timeout_schedule(struct common_timeout_list *ctl, conststruct timeval *now, struct event *head)
{ struct timeval timeout = head->ev_timeout;
timeout.tv_usec &= MICROSECONDS_MASK;
event_add_nolock_(&ctl->timeout_event, &timeout, 1);
}
/* Callback: invoked when the timeout for a common timeout queue triggers. * This means that (at least) the first event in that queue should be run,
* and the timeout should be rescheduled if there are more events. */ staticvoid
common_timeout_callback(evutil_socket_t fd, short what, void *arg)
{ struct timeval now; struct common_timeout_list *ctl = arg; struct event_base *base = ctl->base; struct event *ev = NULL;
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
gettime(base, &now); while (1) {
ev = TAILQ_FIRST(&ctl->events); if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
(ev->ev_timeout.tv_sec == now.tv_sec &&
(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec)) break;
event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
event_active_nolock_(ev, EV_TIMEOUT, 1);
} if (ev)
common_timeout_schedule(ctl, &now, ev);
EVBASE_RELEASE_LOCK(base, th_base_lock);
}
/* Closure function invoked when we're activating a persistent event. */ staticinlinevoid
event_persist_closure(struct event_base *base, struct event *ev)
{ void (*evcb_callback)(evutil_socket_t, short, void *);
// Other fields of *ev that must be stored before executing
evutil_socket_t evcb_fd; short evcb_res; void *evcb_arg;
/* reschedule the persistent event if we have a timeout. */ if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) { /* If there was a timeout, we want it to run at an interval of * ev_io_timeout after the last time it was _scheduled_ for, * not ev_io_timeout after _now_. If it fired for another
* reason, though, the timeout ought to start ticking _now_. */ struct timeval run_at, relative_to, delay, now;
ev_uint32_t usec_mask = 0;
EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
&ev->ev_io_timeout));
gettime(base, &now); if (is_common_timeout(&ev->ev_timeout, base)) {
delay = ev->ev_io_timeout;
usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
delay.tv_usec &= MICROSECONDS_MASK; if (ev->ev_res & EV_TIMEOUT) {
relative_to = ev->ev_timeout;
relative_to.tv_usec &= MICROSECONDS_MASK;
} else {
relative_to = now;
}
} else {
delay = ev->ev_io_timeout; if (ev->ev_res & EV_TIMEOUT) {
relative_to = ev->ev_timeout;
} else {
relative_to = now;
}
}
evutil_timeradd(&relative_to, &delay, &run_at); if (evutil_timercmp(&run_at, &now, <)) { /* Looks like we missed at least one invocation due to * a clock jump, not running the event loop for a * while, really slow callbacks, or * something. Reschedule relative to now.
*/
evutil_timeradd(&now, &delay, &run_at);
}
run_at.tv_usec |= usec_mask;
event_add_nolock_(ev, &run_at, 1);
}
// Save our callback before we release the lock
evcb_callback = ev->ev_callback;
evcb_fd = ev->ev_fd;
evcb_res = ev->ev_res;
evcb_arg = ev->ev_arg;
// Release the lock
EVBASE_RELEASE_LOCK(base, th_base_lock);
// Execute the callback
(evcb_callback)(evcb_fd, evcb_res, evcb_arg);
}
/* Helper for event_process_active to process all the events in a single queue, releasing the lock as we go. This function requires that the lock be held when it's invoked. Returns -1 if we get a signal or an event_break that means we should stop processing any active events now. Otherwise returns the number of non-internal event_callbacks that we processed.
*/ staticint
event_process_active_single_queue(struct event_base *base, struct evcallback_list *activeq, int max_to_process, conststruct timeval *endtime)
{ struct event_callback *evcb; int count = 0;
EVUTIL_ASSERT(activeq != NULL);
for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) { struct event *ev=NULL; if (evcb->evcb_flags & EVLIST_INIT) {
ev = event_callback_to_event(evcb);
if (base->event_break) return -1; if (count >= max_to_process) return count; if (count && endtime) { struct timeval now;
update_time_cache(base);
gettime(base, &now); if (evutil_timercmp(&now, endtime, >=)) return count;
} if (base->event_continue) break;
} return count;
}
/* * Active events are stored in priority queues. Lower priorities are always * process before higher priorities. Low priority events can starve high * priority ones.
*/
staticint
event_process_active(struct event_base *base)
{ /* Caller must hold th_base_lock */ struct evcallback_list *activeq = NULL; int i, c = 0; conststruct timeval *endtime; struct timeval tv; constint maxcb = base->max_dispatch_callbacks; constint limit_after_prio = base->limit_callbacks_after_prio; if (base->max_dispatch_time.tv_sec >= 0) {
update_time_cache(base);
gettime(base, &tv);
evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
endtime = &tv;
} else {
endtime = NULL;
}
for (i = 0; i < base->nactivequeues; ++i) { if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
base->event_running_priority = i;
activeq = &base->activequeues[i]; if (i < limit_after_prio)
c = event_process_active_single_queue(base, activeq,
INT_MAX, NULL); else
c = event_process_active_single_queue(base, activeq,
maxcb, endtime); if (c < 0) { goto done;
} elseif (c > 0) break; /* Processed a real event; do not
* consider lower-priority events */ /* If we get here, all of the events we processed
* were internal. Continue. */
}
}
done:
base->event_running_priority = -1;
return c;
}
/* * Wait continuously for events. We exit only if no events are left.
*/
int
event_dispatch(void)
{ return (event_loop(0));
}
int
event_base_dispatch(struct event_base *event_base)
{ return (event_base_loop(event_base, 0));
}
/** Callback: used to implement event_base_loopexit by telling the event_base
* that it's time to exit its loop. */ staticvoid
event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
{ struct event_base *base = arg;
base->event_gotterm = 1;
}
if (EVBASE_NEED_NOTIFY(event_base)) {
r = evthread_notify_base(event_base);
} else {
r = (0);
}
EVBASE_RELEASE_LOCK(event_base, th_base_lock); return r;
}
int
event_base_got_break(struct event_base *event_base)
{ int res;
EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
res = event_base->event_break;
EVBASE_RELEASE_LOCK(event_base, th_base_lock); return res;
}
int
event_base_got_exit(struct event_base *event_base)
{ int res;
EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
res = event_base->event_gotterm;
EVBASE_RELEASE_LOCK(event_base, th_base_lock); return res;
}
/* not thread safe */
int
event_loop(int flags)
{ return event_base_loop(current_base, flags);
}
int
event_base_loop(struct event_base *base, int flags)
{ conststruct eventop *evsel = base->evsel; struct timeval tv; struct timeval *tv_p; int res, done, retval = 0;
/* Grab the lock. We will release it inside evsel.dispatch, and again
* as we invoke user callbacks. */
EVBASE_ACQUIRE_LOCK(base, th_base_lock);
if (base->running_loop) {
event_warnx("%s: reentrant invocation. Only one event_base_loop" " can run on each event_base at once.", __func__);
EVBASE_RELEASE_LOCK(base, th_base_lock); return -1;
}
base->running_loop = 1;
clear_time_cache(base);
if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
evsig_set_base_(base);
while (!done) {
base->event_continue = 0;
base->n_deferreds_queued = 0;
/* Terminate the loop if we have been asked to */ if (base->event_gotterm) { break;
}
if (base->event_break) { break;
}
tv_p = &tv; if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
timeout_next(base, &tv_p);
} else { /* * if we have active events, we just poll new events * without waiting.
*/
evutil_timerclear(&tv);
}
/* If we have no events, we just exit */ if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
event_debug(("%s: no events registered.", __func__));
retval = 1; goto done;
}
event_queue_make_later_events_active(base);
clear_time_cache(base);
res = evsel->dispatch(base, tv_p);
if (res == -1) {
event_debug(("%s: dispatch returned unsuccessfully.",
__func__));
retval = -1; goto done;
}
update_time_cache(base);
timeout_process(base);
if (N_ACTIVE_CALLBACKS(base)) { int n = event_process_active(base); if ((flags & EVLOOP_ONCE)
&& N_ACTIVE_CALLBACKS(base) == 0
&& n != 0)
done = 1;
} elseif (flags & EVLOOP_NONBLOCK)
done = 1;
}
event_debug(("%s: asked to terminate loop.", __func__));
/* One-time callback to implement event_base_once: invokes the user callback,
* then deletes the allocated storage */ staticvoid
event_once_cb(evutil_socket_t fd, short events, void *arg)
{ struct event_once *eonce = arg;
if (tv == NULL || ! evutil_timerisset(tv)) { /* If the event is going to become active immediately, * don't put it on the timeout queue. This is one * idiom for scheduling a callback, so let's make
* it fast (and order-preserving). */
activate = 1;
}
} elseif (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
events &= EV_READ|EV_WRITE|EV_CLOSED;
if (events & EV_SIGNAL) { if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
event_warnx("%s: EV_SIGNAL is not compatible with " "EV_READ, EV_WRITE or EV_CLOSED", __func__); return -1;
}
ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
} else { if (events & EV_PERSIST) {
evutil_timerclear(&ev->ev_io_timeout);
ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
} else {
ev->ev_closure = EV_CLOSURE_EVENT;
}
}
min_heap_elem_init_(ev);
if (base != NULL) { /* by default, we put new events into the middle priority */
ev->ev_pri = base->nactivequeues / 2;
}
event_debug_note_setup_(ev);
return 0;
}
int
event_base_set(struct event_base *base, struct event *ev)
{ /* Only innocent events may be assigned to a different base */ if (ev->ev_flags != EVLIST_INIT) return (-1);
void
event_free(struct event *ev)
{ /* This is disabled, so that events which have been finalized be a
* valid target for event_free(). That's */ // event_debug_assert_is_setup_(ev);
/* make sure that this event won't be coming back to haunt us. */
event_del(ev);
event_debug_note_teardown_(ev);
mm_free(ev);
/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided * callback will be invoked on *one of them*, after they have *all* been
* finalized. */ int
event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
{ int n_pending = 0, i;
/* At most one can be currently executing; the rest we just * cancel... But we always make sure that the finalize callback
* runs. */ for (i = 0; i < n_cbs; ++i) { struct event_callback *evcb = evcbs[i]; if (evcb == base->current_event) {
event_callback_finalize_nolock_(base, 0, evcb, cb);
++n_pending;
} else {
event_callback_cancel_nolock_(base, evcb, 0);
}
}
if (n_pending == 0) { /* Just do the first one. */
event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
}
/* See if there is a timeout that we should report */ if (tv != NULL && (flags & event & EV_TIMEOUT)) { struct timeval tmp = ev->ev_timeout;
tmp.tv_usec &= MICROSECONDS_MASK; /* correctly remamp to real time */
evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
}
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
return (flags & event);
}
int
event_initialized(conststruct event *ev)
{ if (!(ev->ev_flags & EVLIST_INIT)) return 0;
int
event_get_priority(conststruct event *ev)
{
event_debug_assert_is_setup_(ev); return ev->ev_pri;
}
int
event_add(struct event *ev, conststruct timeval *tv)
{ int res;
if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
event_warnx("%s: event has no event_base set.", __func__); return -1;
}
EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
res = event_add_nolock_(ev, tv, 0);
EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
return (res);
}
/* Helper callback: wake an event_base from another thread. This version * works by writing a byte to one end of a socketpair, so that the event_base * listening on the other end will wake up as the corresponding event
* triggers */ staticint
evthread_notify_base_default(struct event_base *base)
{ char buf[1]; int r;
buf[0] = (char) 0; #ifdef _WIN32
r = send(base->th_notify_fd[1], buf, 1, 0); #else
r = write(base->th_notify_fd[1], buf, 1); #endif return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.