/* * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
int
evthread_set_lock_callbacks(conststruct evthread_lock_callbacks *cbs)
{ struct evthread_lock_callbacks *target = evthread_get_lock_callbacks();
#ifndef EVENT__DISABLE_DEBUG_MODE if (event_debug_mode_on_) { if (event_debug_created_threadable_ctx_) {
event_errx(1, "evthread initialization must be called BEFORE anything else!");
}
} #endif
if (!cbs) { if (target->alloc)
event_warnx("Trying to disable lock functions after " "they have been set up will probaby not work.");
memset(target, 0, sizeof(evthread_lock_fns_)); return 0;
} if (target->alloc) { /* Uh oh; we already had locking callbacks set up.*/ if (target->lock_api_version == cbs->lock_api_version &&
target->supported_locktypes == cbs->supported_locktypes &&
target->alloc == cbs->alloc &&
target->free == cbs->free &&
target->lock == cbs->lock &&
target->unlock == cbs->unlock) { /* no change -- allow this. */ return 0;
}
event_warnx("Can't change lock callbacks once they have been " "initialized."); return -1;
} if (cbs->alloc && cbs->free && cbs->lock && cbs->unlock) {
memcpy(target, cbs, sizeof(evthread_lock_fns_)); return event_global_setup_locks_(1);
} else { return -1;
}
}
int
evthread_set_condition_callbacks(conststruct evthread_condition_callbacks *cbs)
{ struct evthread_condition_callbacks *target = evthread_get_condition_callbacks();
#ifndef EVENT__DISABLE_DEBUG_MODE if (event_debug_mode_on_) { if (event_debug_created_threadable_ctx_) {
event_errx(1, "evthread initialization must be called BEFORE anything else!");
}
} #endif
if (!cbs) { if (target->alloc_condition)
event_warnx("Trying to disable condition functions " "after they have been set up will probaby not " "work.");
memset(target, 0, sizeof(evthread_cond_fns_)); return 0;
} if (target->alloc_condition) { /* Uh oh; we already had condition callbacks set up.*/ if (target->condition_api_version == cbs->condition_api_version &&
target->alloc_condition == cbs->alloc_condition &&
target->free_condition == cbs->free_condition &&
target->signal_condition == cbs->signal_condition &&
target->wait_condition == cbs->wait_condition) { /* no change -- allow this. */ return 0;
}
event_warnx("Can't change condition callbacks once they " "have been initialized."); return -1;
} if (cbs->alloc_condition && cbs->free_condition &&
cbs->signal_condition && cbs->wait_condition) {
memcpy(target, cbs, sizeof(evthread_cond_fns_));
} if (evthread_lock_debugging_enabled_) {
evthread_cond_fns_.alloc_condition = cbs->alloc_condition;
evthread_cond_fns_.free_condition = cbs->free_condition;
evthread_cond_fns_.signal_condition = cbs->signal_condition;
} return 0;
}
#define DEBUG_LOCK_SIG 0xdeb0b10c
struct debug_lock { unsigned signature; unsigned locktype; unsignedlong held_by; /* XXXX if we ever use read-write locks, we will need a separate
* lock to protect count. */ int count; void *lock;
};
void *
evthread_setup_global_lock_(void *lock_, unsigned locktype, int enable_locks)
{ /* there are four cases here: 1) we're turning on debugging; locking is not on. 2) we're turning on debugging; locking is on. 3) we're turning on locking; debugging is not on.
4) we're turning on locking; debugging is on. */
if (!enable_locks && original_lock_fns_.alloc == NULL) { /* Case 1: allocate a debug lock. */
EVUTIL_ASSERT(lock_ == NULL); return debug_lock_alloc(locktype);
} elseif (!enable_locks && original_lock_fns_.alloc != NULL) { /* Case 2: wrap the lock in a debug lock. */ struct debug_lock *lock;
EVUTIL_ASSERT(lock_ != NULL);
if (!(locktype & EVTHREAD_LOCKTYPE_RECURSIVE)) { /* We can't wrap it: We need a recursive lock */
original_lock_fns_.free(lock_, locktype); return debug_lock_alloc(locktype);
}
lock = mm_malloc(sizeof(struct debug_lock)); if (!lock) {
original_lock_fns_.free(lock_, locktype); return NULL;
}
lock->lock = lock_;
lock->locktype = locktype;
lock->count = 0;
lock->held_by = 0; return lock;
} elseif (enable_locks && ! evthread_lock_debugging_enabled_) { /* Case 3: allocate a regular lock */
EVUTIL_ASSERT(lock_ == NULL); return evthread_lock_fns_.alloc(locktype);
} else { /* Case 4: Fill in a debug lock with a real lock */ struct debug_lock *lock = lock_ ? lock_ : debug_lock_alloc(locktype);
EVUTIL_ASSERT(enable_locks &&
evthread_lock_debugging_enabled_);
EVUTIL_ASSERT(lock->locktype == locktype); if (!lock->lock) {
lock->lock = original_lock_fns_.alloc(
locktype|EVTHREAD_LOCKTYPE_RECURSIVE); if (!lock->lock) {
lock->count = -200;
mm_free(lock); return NULL;
}
} return lock;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.