/* * Copyright (c) 2002-2007 Niels Provos <provos@citi.umich.edu> * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef EVENT__HAVE_VASPRINTF /* If we have vasprintf, we need to define _GNU_SOURCE before we include * stdio.h. This comes from evconfig-private.h.
*/ #endif
if (size > EVBUFFER_CHAIN_MAX - EVBUFFER_CHAIN_SIZE) return (NULL);
size += EVBUFFER_CHAIN_SIZE;
/* get the next largest memory that can hold the buffer */ if (size < EVBUFFER_CHAIN_MAX / 2) {
to_alloc = MIN_BUFFER_SIZE; while (to_alloc < size) {
to_alloc <<= 1;
}
} else {
to_alloc = size;
}
/* we get everything in one chunk */ if ((chain = mm_malloc(to_alloc)) == NULL) return (NULL);
/* this way we can manipulate the buffer to different addresses, * which is required for mmap for example.
*/
chain->buffer = EVBUFFER_CHAIN_EXTRA(unsignedchar, chain);
chain->refcnt = 1;
return (chain);
}
staticinlinevoid
evbuffer_chain_free(struct evbuffer_chain *chain)
{
EVUTIL_ASSERT(chain->refcnt > 0); if (--chain->refcnt > 0) { /* chain is still referenced by other chains */ return;
}
if (CHAIN_PINNED(chain)) { /* will get freed once no longer dangling */
chain->refcnt++;
chain->flags |= EVBUFFER_DANGLING; return;
}
/* safe to release chain, it's either a referencing
* chain or all references to it have been freed */ if (chain->flags & EVBUFFER_REFERENCE) { struct evbuffer_chain_reference *info =
EVBUFFER_CHAIN_EXTRA( struct evbuffer_chain_reference,
chain); if (info->cleanupfn)
(*info->cleanupfn)(chain->buffer,
chain->buffer_len,
info->extra);
} if (chain->flags & EVBUFFER_FILESEGMENT) { struct evbuffer_chain_file_segment *info =
EVBUFFER_CHAIN_EXTRA( struct evbuffer_chain_file_segment,
chain); if (info->segment) { #ifdef _WIN32 if (info->segment->is_mapping)
UnmapViewOfFile(chain->buffer); #endif
evbuffer_file_segment_free(info->segment);
}
} if (chain->flags & EVBUFFER_MULTICAST) { struct evbuffer_multicast_parent *info =
EVBUFFER_CHAIN_EXTRA( struct evbuffer_multicast_parent,
chain); /* referencing chain is being freed, decrease * refcounts of source chain and associated * evbuffer (which get freed once both reach
* zero) */
EVUTIL_ASSERT(info->source != NULL);
EVUTIL_ASSERT(info->parent != NULL);
EVBUFFER_LOCK(info->source);
evbuffer_chain_free(info->parent);
evbuffer_decref_and_unlock_(info->source);
}
#ifndef NDEBUG staticint
evbuffer_chains_all_empty(struct evbuffer_chain *chain)
{ for (; chain; chain = chain->next) { if (chain->off) return 0;
} return 1;
} #else /* The definition is needed for EVUTIL_ASSERT, which uses sizeof to avoid
"unused variable" warnings. */ staticinlineint evbuffer_chains_all_empty(struct evbuffer_chain *chain) { return 1;
} #endif
/* Free all trailing chains in 'buf' that are neither pinned nor empty, prior * to replacing them all with a new chain. Return a pointer to the place * where the new chain will go. * * Internal; requires lock. The caller must fix up buf->last and buf->first * as needed; they might have been freed.
*/ staticstruct evbuffer_chain **
evbuffer_free_trailing_empty_chains(struct evbuffer *buf)
{ struct evbuffer_chain **ch = buf->last_with_datap; /* Find the first victim chain. It might be *last_with_datap */ while ((*ch) && ((*ch)->off != 0 || CHAIN_PINNED(*ch)))
ch = &(*ch)->next; if (*ch) {
EVUTIL_ASSERT(evbuffer_chains_all_empty(*ch));
evbuffer_free_all_chains(*ch);
*ch = NULL;
} return ch;
}
/* Add a single chain 'chain' to the end of 'buf', freeing trailing empty * chains as necessary. Requires lock. Does not schedule callbacks.
*/ staticvoid
evbuffer_chain_insert(struct evbuffer *buf, struct evbuffer_chain *chain)
{
ASSERT_EVBUFFER_LOCKED(buf); if (*buf->last_with_datap == NULL) { /* There are no chains data on the buffer at all. */
EVUTIL_ASSERT(buf->last_with_datap == &buf->first);
EVUTIL_ASSERT(buf->first == NULL);
buf->first = buf->last = chain;
} else { struct evbuffer_chain **chp;
chp = evbuffer_free_trailing_empty_chains(buf);
*chp = chain; if (chain->off)
buf->last_with_datap = chp;
buf->last = chain;
}
buf->total_len += chain->off;
}
if (buffer->deferred_cbs) { if (event_deferred_cb_schedule_(buffer->cb_queue, &buffer->deferred)) {
evbuffer_incref_and_lock_(buffer); if (buffer->parent)
bufferevent_incref_(buffer->parent);
EVBUFFER_UNLOCK(buffer);
}
}
/* XXXX It would be better to run these callbacks without holding the
* lock */
EVBUFFER_LOCK(buffer);
parent = buffer->parent;
evbuffer_run_callbacks(buffer, 1);
evbuffer_decref_and_unlock_(buffer); if (parent)
bufferevent_decref_(parent);
}
size_t
evbuffer_add_iovec(struct evbuffer * buf, struct evbuffer_iovec * vec, int n_vec) { int n;
size_t res;
size_t to_alloc;
EVBUFFER_LOCK(buf);
res = to_alloc = 0;
for (n = 0; n < n_vec; n++) {
to_alloc += vec[n].iov_len;
}
if (evbuffer_expand_fast_(buf, to_alloc, 2) < 0) { goto done;
}
for (n = 0; n < n_vec; n++) { /* XXX each 'add' call here does a bunch of setup that's * obviated by evbuffer_expand_fast_, and some cleanup that we * would like to do only once. Instead we should just extract
* the part of the code that's needed. */
if (evbuffer_add(buf, vec[n].iov_base, vec[n].iov_len) < 0) { goto done;
}
res += vec[n].iov_len;
}
done:
EVBUFFER_UNLOCK(buf); return res;
}
int
evbuffer_reserve_space(struct evbuffer *buf, ev_ssize_t size, struct evbuffer_iovec *vec, int n_vecs)
{ struct evbuffer_chain *chain, **chainp; int n = -1;
EVBUFFER_LOCK(buf); if (buf->freeze_end) goto done; if (n_vecs < 1) goto done; if (n_vecs == 1) { if ((chain = evbuffer_expand_singlechain(buf, size)) == NULL) goto done;
staticint
advance_last_with_data(struct evbuffer *buf)
{ int n = 0; struct evbuffer_chain **chainp = buf->last_with_datap;
ASSERT_EVBUFFER_LOCKED(buf);
if (!*chainp) return 0;
while ((*chainp)->next) {
chainp = &(*chainp)->next; if ((*chainp)->off)
buf->last_with_datap = chainp;
++n;
} return n;
}
int
evbuffer_commit_space(struct evbuffer *buf, struct evbuffer_iovec *vec, int n_vecs)
{ struct evbuffer_chain *chain, **firstchainp, **chainp; int result = -1;
size_t added = 0; int i;
EVBUFFER_LOCK(buf);
if (buf->freeze_end) goto done; if (n_vecs == 0) {
result = 0; goto done;
} elseif (n_vecs == 1 &&
(buf->last && vec[0].iov_base == (void *)CHAIN_SPACE_PTR(buf->last))) { /* The user only got or used one chain; it might not
* be the first one with space in it. */ if ((size_t)vec[0].iov_len > (size_t)CHAIN_SPACE_LEN(buf->last)) goto done;
buf->last->off += vec[0].iov_len;
added = vec[0].iov_len; if (added)
advance_last_with_data(buf); goto okay;
}
/* Advance 'firstchain' to the first chain with space in it. */
firstchainp = buf->last_with_datap; if (!*firstchainp) goto done; if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
firstchainp = &(*firstchainp)->next;
}
chain = *firstchainp; /* pass 1: make sure that the pointers and lengths of vecs[] are in
* bounds before we try to commit anything. */ for (i=0; i<n_vecs; ++i) { if (!chain) goto done; if (vec[i].iov_base != (void *)CHAIN_SPACE_PTR(chain) ||
(size_t)vec[i].iov_len > CHAIN_SPACE_LEN(chain)) goto done;
chain = chain->next;
} /* pass 2: actually adjust all the chains. */
chainp = firstchainp; for (i=0; i<n_vecs; ++i) {
(*chainp)->off += vec[i].iov_len;
added += vec[i].iov_len; if (vec[i].iov_len) {
buf->last_with_datap = chainp;
}
chainp = &(*chainp)->next;
}
/* Prepares the contents of src to be moved to another buffer by removing * read-pinned chains. The first pinned chain is saved in first, and the * last in last. If src has no read-pinned chains, first and last are set
* to NULL. */ staticint
PRESERVE_PINNED(struct evbuffer *src, struct evbuffer_chain **first, struct evbuffer_chain **last)
{ struct evbuffer_chain *chain, **pinned;
/* If there's data in the first pinned chain, we need to allocate
* a new chain and copy the data over. */ if (chain->off) { struct evbuffer_chain *tmp;
tmp = evbuffer_chain_new(sizeof(struct evbuffer_multicast_parent)); if (!tmp) {
event_warn("%s: out of memory", __func__); return;
}
extra = EVBUFFER_CHAIN_EXTRA(struct evbuffer_multicast_parent, tmp); /* reference evbuffer containing source chain so it * doesn't get released while the chain is still
* being referenced to */
evbuffer_incref_(src);
extra->source = src; /* reference source chain which now becomes immutable */
evbuffer_chain_incref(chain);
extra->parent = chain;
chain->flags |= EVBUFFER_IMMUTABLE;
tmp->buffer_len = chain->buffer_len;
tmp->misalign = chain->misalign;
tmp->off = chain->off;
tmp->flags |= EVBUFFER_MULTICAST|EVBUFFER_IMMUTABLE;
tmp->buffer = chain->buffer;
evbuffer_chain_insert(dst, tmp);
}
}
if (in_total_len == 0 || outbuf == inbuf) goto done;
if (outbuf->freeze_end || inbuf->freeze_start) {
result = -1; goto done;
}
if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
result = -1; goto done;
}
if (out_total_len == 0) { /* There might be an empty chain at the start of outbuf; free
* it. */
evbuffer_free_all_chains(outbuf->first);
COPY_CHAIN(outbuf, inbuf);
} else {
APPEND_CHAIN(outbuf, inbuf);
}
if (outbuf->freeze_end || outbuf == inbuf) {
result = -1; goto done;
}
for (; chain; chain = chain->next) { if ((chain->flags & (EVBUFFER_FILESEGMENT|EVBUFFER_SENDFILE|EVBUFFER_MULTICAST)) != 0) { /* chain type can not be referenced */
result = -1; goto done;
}
}
if (out_total_len == 0) { /* There might be an empty chain at the start of outbuf; free
* it. */
evbuffer_free_all_chains(outbuf->first);
}
APPEND_CHAIN_MULTICAST(outbuf, inbuf);
if (outbuf->freeze_start || inbuf->freeze_start) {
result = -1; goto done;
}
if (PRESERVE_PINNED(inbuf, &pinned, &last) < 0) {
result = -1; goto done;
}
if (out_total_len == 0) { /* There might be an empty chain at the start of outbuf; free
* it. */
evbuffer_free_all_chains(outbuf->first);
COPY_CHAIN(outbuf, inbuf);
} else {
PREPEND_CHAIN(outbuf, inbuf);
}
buf->n_del_for_cb += len; /* Tell someone about changes in this buffer */
evbuffer_invoke_callbacks_(buf);
done:
EVBUFFER_UNLOCK(buf); return result;
}
/* Reads data from an event buffer and drains the bytes read */ int
evbuffer_remove(struct evbuffer *buf, void *data_out, size_t datlen)
{
ev_ssize_t n;
EVBUFFER_LOCK(buf);
n = evbuffer_copyout_from(buf, NULL, data_out, datlen); if (n > 0) { if (evbuffer_drain(buf, n)<0)
n = -1;
}
EVBUFFER_UNLOCK(buf); return (int)n;
}
result = nread;
done:
EVBUFFER_UNLOCK(buf); return result;
}
/* reads data from the src buffer to the dst buffer, avoids memcpy as
* possible. */ /* XXXX should return ev_ssize_t */ int
evbuffer_remove_buffer(struct evbuffer *src, struct evbuffer *dst,
size_t datlen)
{ /*XXX We should have an option to force this to be zero-copy.*/
/*XXX can fail badly on sendfile case. */ struct evbuffer_chain *chain, *previous;
size_t nread = 0; int result;
EVBUFFER_LOCK2(src, dst);
chain = previous = src->first;
if (datlen == 0 || dst == src) {
result = 0; goto done;
}
if (dst->freeze_end || src->freeze_start) {
result = -1; goto done;
}
/* short-cut if there is no more data buffered */ if (datlen >= src->total_len) {
datlen = src->total_len;
evbuffer_add_buffer(dst, src);
result = (int)datlen; /*XXXX should return ev_ssize_t*/ goto done;
}
/* removes chains if possible */ while (chain->off <= datlen) { /* We can't remove the last with data from src unless we * remove all chains, in which case we would have done the if
* block above */
EVUTIL_ASSERT(chain != *src->last_with_datap);
nread += chain->off;
datlen -= chain->off;
previous = chain; if (src->last_with_datap == &chain->next)
src->last_with_datap = &src->first;
chain = chain->next;
}
if (chain != src->first) { /* we can remove the chain */ struct evbuffer_chain **chp;
chp = evbuffer_free_trailing_empty_chains(dst);
/* we know that there is more data in the src buffer than
* we want to read, so we manually drain the chain */
evbuffer_add(dst, chain->buffer + chain->misalign, datlen);
chain->misalign += datlen;
chain->off -= datlen;
nread += datlen;
/* You might think we would want to increment dst->n_add_for_cb * here too. But evbuffer_add above already took care of that.
*/
src->total_len -= nread;
src->n_del_for_cb += nread;
if (nread) {
evbuffer_invoke_callbacks_(dst);
evbuffer_invoke_callbacks_(src);
}
result = (int)nread;/*XXXX should change return type */
if (size < 0)
size = buf->total_len; /* if size > buf->total_len, we cannot guarantee to the user that she * is going to have a long enough buffer afterwards; so we return
* NULL */ if (size == 0 || (size_t)size > buf->total_len) goto done;
/* No need to pull up anything; the first size bytes are
* already here. */ if (chain->off >= (size_t)size) {
result = chain->buffer + chain->misalign; goto done;
}
/* Make sure that none of the chains we need to copy from is pinned. */
remaining = size - chain->off;
EVUTIL_ASSERT(remaining >= 0); for (tmp=chain->next; tmp; tmp=tmp->next) { if (CHAIN_PINNED(tmp)) goto done; if (tmp->off >= (size_t)remaining) break;
remaining -= tmp->off;
}
if (CHAIN_PINNED(chain)) {
size_t old_off = chain->off; if (CHAIN_SPACE_LEN(chain) < size - chain->off) { /* not enough room at end of chunk. */ goto done;
}
buffer = CHAIN_SPACE_PTR(chain);
tmp = chain;
tmp->off = size;
size -= old_off;
chain = chain->next;
} elseif (chain->buffer_len - chain->misalign >= (size_t)size) { /* already have enough space in the first chain */
size_t old_off = chain->off;
buffer = chain->buffer + chain->misalign + chain->off;
tmp = chain;
tmp->off = size;
size -= old_off;
chain = chain->next;
} else { if ((tmp = evbuffer_chain_new(size)) == NULL) {
event_warn("%s: out of memory", __func__); goto done;
}
buffer = tmp->buffer;
tmp->off = size;
buf->first = tmp;
}
/* TODO(niels): deal with buffers that point to NULL like sendfile */
/* Copy and free every chunk that will be entirely pulled into tmp */
last_with_data = *buf->last_with_datap; for (; chain != NULL && (size_t)size >= chain->off; chain = next) {
next = chain->next;
if (chain->buffer) {
memcpy(buffer, chain->buffer + chain->misalign, chain->off);
size -= chain->off;
buffer += chain->off;
} if (chain == last_with_data)
removed_last_with_data = 1; if (&chain->next == buf->last_with_datap)
removed_last_with_datap = 1;
/* * Reads a line terminated by either '\r\n', '\n\r' or '\r' or '\n'. * The returned buffer needs to be freed by the called.
*/ char *
evbuffer_readline(struct evbuffer *buffer)
{ return evbuffer_readln(buffer, NULL, EVBUFFER_EOL_ANY);
}
staticinlinechar *
find_eol_char(char *s, size_t len)
{ #define CHUNK_SZ 128 /* Lots of benchmarking found this approach to be faster in practice * than doing two memchrs over the whole buffer, doin a memchr on each
* char of the buffer, or trying to emulate memchr by hand. */ char *s_end, *cr, *lf;
s_end = s+len; while (s < s_end) {
size_t chunk = (s + CHUNK_SZ < s_end) ? CHUNK_SZ : (s_end - s);
cr = memchr(s, '\r', chunk);
lf = memchr(s, '\n', chunk); if (cr) { if (lf && lf < cr) return lf; return cr;
} elseif (lf) { return lf;
}
s += CHUNK_SZ;
}
/* the eol_style determines our first stop character and how many
* characters we are going to drain afterwards. */ switch (eol_style) { case EVBUFFER_EOL_ANY: if (evbuffer_find_eol_char(&it) < 0) goto done;
memcpy(&it2, &it, sizeof(it));
extra_drain = evbuffer_strspn(&it2, "\r\n"); break; case EVBUFFER_EOL_CRLF_STRICT: {
it = evbuffer_search(buffer, "\r\n", 2, &it); if (it.pos < 0) goto done;
extra_drain = 2; break;
} case EVBUFFER_EOL_CRLF: {
ev_ssize_t start_pos = it.pos; /* Look for a LF ... */ if (evbuffer_strchr(&it, '\n') < 0) goto done;
extra_drain = 1; /* ... optionally preceeded by a CR. */ if (it.pos == start_pos) break; /* If the first character is \n, don't back up */ /* This potentially does an extra linear walk over the first * few chains. Probably, that's not too expensive unless you
* have a really pathological setup. */
memcpy(&it2, &it, sizeof(it)); if (evbuffer_ptr_subtract(buffer, &it2, 1)<0) break; if (evbuffer_getchr(&it2) == '\r') {
memcpy(&it, &it2, sizeof(it));
extra_drain = 2;
} break;
} case EVBUFFER_EOL_LF: if (evbuffer_strchr(&it, '\n') < 0) goto done;
extra_drain = 1; break; case EVBUFFER_EOL_NUL: if (evbuffer_strchr(&it, '\0') < 0) goto done;
extra_drain = 1; break; default: goto done;
}
ok = 1;
done:
EVBUFFER_UNLOCK(buffer);
if (!ok)
PTR_NOT_FOUND(&it); if (eol_len_out)
*eol_len_out = extra_drain;
/* If there are no chains allocated for this buffer, allocate one
* big enough to hold all the data. */ if (chain == NULL) {
chain = evbuffer_chain_new(datlen); if (!chain) goto done;
evbuffer_chain_insert(buf, chain);
}
if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { /* Always true for mutable buffers */
EVUTIL_ASSERT(chain->misalign >= 0 &&
(ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
remain = chain->buffer_len - (size_t)chain->misalign - chain->off; if (remain >= datlen) { /* there's enough space to hold all the data in the
* current last chain */
memcpy(chain->buffer + chain->misalign + chain->off,
data, datlen);
chain->off += datlen;
buf->total_len += datlen;
buf->n_add_for_cb += datlen; goto out;
} elseif (!CHAIN_PINNED(chain) &&
evbuffer_chain_should_realign(chain, datlen)) { /* we can fit the data into the misalignment */
evbuffer_chain_align(chain);
memcpy(chain->buffer + chain->off, data, datlen);
chain->off += datlen;
buf->total_len += datlen;
buf->n_add_for_cb += datlen; goto out;
}
} else { /* we cannot write any data to the last chain */
remain = 0;
}
/* we need to add another chain */
to_alloc = chain->buffer_len; if (to_alloc <= EVBUFFER_CHAIN_MAX_AUTO_SIZE/2)
to_alloc <<= 1; if (datlen > to_alloc)
to_alloc = datlen;
tmp = evbuffer_chain_new(to_alloc); if (tmp == NULL) goto done;
out:
evbuffer_invoke_callbacks_(buf);
result = 0;
done:
EVBUFFER_UNLOCK(buf); return result;
}
int
evbuffer_prepend(struct evbuffer *buf, constvoid *data, size_t datlen)
{ struct evbuffer_chain *chain, *tmp; int result = -1;
EVBUFFER_LOCK(buf);
if (datlen == 0) {
result = 0; goto done;
} if (buf->freeze_start) { goto done;
} if (datlen > EV_SIZE_MAX - buf->total_len) { goto done;
}
chain = buf->first;
if (chain == NULL) {
chain = evbuffer_chain_new(datlen); if (!chain) goto done;
evbuffer_chain_insert(buf, chain);
}
/* we cannot touch immutable buffers */ if ((chain->flags & EVBUFFER_IMMUTABLE) == 0) { /* Always true for mutable buffers */
EVUTIL_ASSERT(chain->misalign >= 0 &&
(ev_uint64_t)chain->misalign <= EVBUFFER_CHAIN_MAX);
/* If this chain is empty, we can treat it as
* 'empty at the beginning' rather than 'empty at the end' */ if (chain->off == 0)
chain->misalign = chain->buffer_len;
if ((size_t)chain->misalign >= datlen) { /* we have enough space to fit everything */
memcpy(chain->buffer + chain->misalign - datlen,
data, datlen);
chain->off += datlen;
chain->misalign -= datlen;
buf->total_len += datlen;
buf->n_add_for_cb += datlen; goto out;
} elseif (chain->misalign) { /* we can only fit some of the data. */
memcpy(chain->buffer,
(char*)data + datlen - chain->misalign,
(size_t)chain->misalign);
chain->off += (size_t)chain->misalign;
buf->total_len += (size_t)chain->misalign;
buf->n_add_for_cb += (size_t)chain->misalign;
datlen -= (size_t)chain->misalign;
chain->misalign = 0;
}
}
/* we need to add another chain */ if ((tmp = evbuffer_chain_new(datlen)) == NULL) goto done;
buf->first = tmp; if (buf->last_with_datap == &buf->first && chain->off)
buf->last_with_datap = &tmp->next;
/** Helper: return true iff we should realign chain to fit datalen bytes of
data in it. */ staticint
evbuffer_chain_should_realign(struct evbuffer_chain *chain,
size_t datlen)
{ return chain->buffer_len - chain->off >= datlen &&
(chain->off < chain->buffer_len / 2) &&
(chain->off <= MAX_TO_REALIGN_IN_EXPAND);
}
/* Expands the available space in the event buffer to at least datlen, all in
* a single chunk. Return that chunk. */ staticstruct evbuffer_chain *
evbuffer_expand_singlechain(struct evbuffer *buf, size_t datlen)
{ struct evbuffer_chain *chain, **chainp; struct evbuffer_chain *result = NULL;
ASSERT_EVBUFFER_LOCKED(buf);
chainp = buf->last_with_datap;
/* XXX If *chainp is no longer writeable, but has enough space in its * misalign, this might be a bad idea: we could still use *chainp, not
* (*chainp)->next. */ if (*chainp && CHAIN_SPACE_LEN(*chainp) == 0)
chainp = &(*chainp)->next;
/* 'chain' now points to the first chain with writable space (if any)
* We will either use it, realign it, replace it, or resize it. */
chain = *chainp;
if (chain == NULL ||
(chain->flags & (EVBUFFER_IMMUTABLE|EVBUFFER_MEM_PINNED_ANY))) { /* We can't use the last_with_data chain at all. Just add a
* new one that's big enough. */ goto insert_new;
}
/* If we can fit all the data, then we don't have to do anything */ if (CHAIN_SPACE_LEN(chain) >= datlen) {
result = chain; goto ok;
}
/* If the chain is completely empty, just replace it by adding a new
* empty chain. */ if (chain->off == 0) { goto insert_new;
}
/* If the misalignment plus the remaining space fulfills our data * needs, we could just force an alignment to happen. Afterwards, we * have enough space. But only do this if we're saving a lot of space * and not moving too much data. Otherwise the space savings are * probably offset by the time lost in copying.
*/ if (evbuffer_chain_should_realign(chain, datlen)) {
evbuffer_chain_align(chain);
result = chain; goto ok;
}
/* At this point, we can either resize the last chunk with space in * it, use the next chunk after it, or If we add a new chunk, we waste * CHAIN_SPACE_LEN(chain) bytes in the former last chunk. If we * resize, we have to copy chain->off bytes.
*/
/* Would expanding this chunk be affordable and worthwhile? */ if (CHAIN_SPACE_LEN(chain) < chain->buffer_len / 8 ||
chain->off > MAX_TO_COPY_IN_EXPAND ||
datlen >= (EVBUFFER_CHAIN_MAX - chain->off)) { /* It's not worth resizing this chain. Can the next one be
* used? */ if (chain->next && CHAIN_SPACE_LEN(chain->next) >= datlen) { /* Yes, we can just use the next chain (which should
* be empty. */
result = chain->next; goto ok;
} else { /* No; append a new chain (which will free all
* terminal empty chains.) */ goto insert_new;
}
} else { /* Okay, we're going to try to resize this chain: Not doing so * would waste at least 1/8 of its current allocation, and we * can do so without having to copy more than
* MAX_TO_COPY_IN_EXPAND bytes. */ /* figure out how much space we need */
size_t length = chain->off + datlen; struct evbuffer_chain *tmp = evbuffer_chain_new(length); if (tmp == NULL) goto err;
/* copy the data over that we had so far */
tmp->off = chain->off;
memcpy(tmp->buffer, chain->buffer + chain->misalign,
chain->off); /* fix up the list */
EVUTIL_ASSERT(*chainp == chain);
result = *chainp = tmp;
insert_new:
result = evbuffer_chain_insert_new(buf, datlen); if (!result) goto err;
ok:
EVUTIL_ASSERT(result);
EVUTIL_ASSERT(CHAIN_SPACE_LEN(result) >= datlen);
err: return result;
}
/* Make sure that datlen bytes are available for writing in the last n
* chains. Never copies or moves data. */ int
evbuffer_expand_fast_(struct evbuffer *buf, size_t datlen, int n)
{ struct evbuffer_chain *chain = buf->last, *tmp, *next;
size_t avail; int used;
if (chain == NULL || (chain->flags & EVBUFFER_IMMUTABLE)) { /* There is no last chunk, or we can't touch the last chunk.
* Just add a new chunk. */
chain = evbuffer_chain_new(datlen); if (chain == NULL) return (-1);
evbuffer_chain_insert(buf, chain); return (0);
}
used = 0; /* number of chains we're using space in. */
avail = 0; /* how much space they have. */ /* How many bytes can we stick at the end of buffer as it is? Iterate * over the chains at the end of the buffer, tring to see how much
* space we have in the first n. */ for (chain = *buf->last_with_datap; chain; chain = chain->next) { if (chain->off) {
size_t space = (size_t) CHAIN_SPACE_LEN(chain);
EVUTIL_ASSERT(chain == *buf->last_with_datap); if (space) {
avail += space;
++used;
}
} else { /* No data in chain; realign it. */
chain->misalign = 0;
avail += chain->buffer_len;
++used;
} if (avail >= datlen) { /* There is already enough space. Just return */ return (0);
} if (used == n) break;
}
/* There wasn't enough space in the first n chains with space in * them. Either add a new chain with enough space, or replace all
* empty chains with one that has enough space, depending on n. */ if (used < n) { /* The loop ran off the end of the chains before it hit n
* chains; we can add another. */
EVUTIL_ASSERT(chain == NULL);
buf->last->next = tmp;
buf->last = tmp; /* (we would only set last_with_data if we added the first * chain. But if the buffer had no chains, we would have
* just allocated a new chain earlier) */ return (0);
} else { /* Nuke _all_ the empty chains. */ int rmv_all = 0; /* True iff we removed last_with_data. */
chain = *buf->last_with_datap; if (!chain->off) {
EVUTIL_ASSERT(chain == buf->first);
rmv_all = 1;
avail = 0;
} else { /* can't overflow, since only mutable chains have
* huge misaligns. */
avail = (size_t) CHAIN_SPACE_LEN(chain);
chain = chain->next;
}
/** Helper function to figure out which space to use for reading data into an evbuffer. Internal use only.
@param buf The buffer to read into @param howmuch How much we want to read. @param vecs An array of two or more iovecs or WSABUFs. @param n_vecs_avail The length of vecs @param chainp A pointer to a variable to hold the first chain we're reading into. @param exact Boolean: if true, we do not provide more than 'howmuch' space in the vectors, even if more space is available. @return The number of buffers we're using.
*/ int
evbuffer_read_setup_vecs_(struct evbuffer *buf, ev_ssize_t howmuch, struct evbuffer_iovec *vecs, int n_vecs_avail, struct evbuffer_chain ***chainp, int exact)
{ struct evbuffer_chain *chain; struct evbuffer_chain **firstchainp;
size_t so_far; int i;
ASSERT_EVBUFFER_LOCKED(buf);
if (howmuch < 0) return -1;
so_far = 0; /* Let firstchain be the first chain with any space on it */
firstchainp = buf->last_with_datap;
EVUTIL_ASSERT(*firstchainp); if (CHAIN_SPACE_LEN(*firstchainp) == 0) {
firstchainp = &(*firstchainp)->next;
}
staticint
get_n_bytes_readable_on_socket(evutil_socket_t fd)
{ #ifdefined(FIONREAD) && defined(_WIN32) unsignedlong lng = EVBUFFER_MAX_READ; if (ioctlsocket(fd, FIONREAD, &lng) < 0) return -1; /* Can overflow, but mostly harmlessly. XXXX */ return (int)lng; #elifdefined(FIONREAD) int n = EVBUFFER_MAX_READ; if (ioctl(fd, FIONREAD, &n) < 0) return -1; return n; #else return EVBUFFER_MAX_READ; #endif
}
/* TODO(niels): should this function return ev_ssize_t and take ev_ssize_t
* as howmuch? */ int
evbuffer_read(struct evbuffer *buf, evutil_socket_t fd, int howmuch)
{ struct evbuffer_chain **chainp; int n; int result;
#ifdef USE_IOVEC_IMPL int nvecs, i, remaining; #else struct evbuffer_chain *chain; unsignedchar *p; #endif
EVBUFFER_LOCK(buf);
if (buf->freeze_end) {
result = -1; goto done;
}
n = get_n_bytes_readable_on_socket(fd); if (n <= 0 || n > EVBUFFER_MAX_READ)
n = EVBUFFER_MAX_READ; if (howmuch < 0 || howmuch > n)
howmuch = n;
#ifdef USE_IOVEC_IMPL /* Since we can use iovecs, we're willing to use the last
* NUM_READ_IOVEC chains. */ if (evbuffer_expand_fast_(buf, howmuch, NUM_READ_IOVEC) == -1) {
result = -1; goto done;
} else {
IOV_TYPE vecs[NUM_READ_IOVEC]; #ifdef EVBUFFER_IOVEC_IS_NATIVE_
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, vecs,
NUM_READ_IOVEC, &chainp, 1); #else /* We aren't using the native struct iovec. Therefore,
we are on win32. */ struct evbuffer_iovec ev_vecs[NUM_READ_IOVEC];
nvecs = evbuffer_read_setup_vecs_(buf, howmuch, ev_vecs, 2,
&chainp, 1);
for (i=0; i < nvecs; ++i)
WSABUF_FROM_EVBUFFER_IOV(&vecs[i], &ev_vecs[i]); #endif
#ifdef _WIN32
{
DWORD bytesRead;
DWORD flags=0; if (WSARecv(fd, vecs, nvecs, &bytesRead, &flags, NULL, NULL)) { /* The read failed. It might be a close,
* or it might be an error. */ if (WSAGetLastError() == WSAECONNABORTED)
n = 0; else
n = -1;
} else
n = bytesRead;
} #else
n = readv(fd, vecs, nvecs); #endif
}
#else/*!USE_IOVEC_IMPL*/ /* If we don't have FIONREAD, we might waste some space here */ /* XXX we _will_ waste some space here if there is any space left
* over on buf->last. */ if ((chain = evbuffer_expand_singlechain(buf, howmuch)) == NULL) {
result = -1; goto done;
}
/* We can append new data at this point */
p = chain->buffer + chain->misalign + chain->off;
#ifndef _WIN32
n = read(fd, p, howmuch); #else
n = recv(fd, p, howmuch, 0); #endif #endif/* USE_IOVEC_IMPL */
if (n == -1) {
result = -1; goto done;
} if (n == 0) {
result = 0; goto done;
}
#ifdef USE_IOVEC_IMPL
remaining = n; for (i=0; i < nvecs; ++i) { /* can't overflow, since only mutable chains have
* huge misaligns. */
size_t space = (size_t) CHAIN_SPACE_LEN(*chainp); /* XXXX This is a kludge that can waste space in perverse
* situations. */ if (space > EVBUFFER_CHAIN_MAX)
space = EVBUFFER_CHAIN_MAX; if ((ev_ssize_t)space < remaining) {
(*chainp)->off += space;
remaining -= (int)space;
} else {
(*chainp)->off += remaining;
buf->last_with_datap = chainp; break;
}
chainp = &(*chainp)->next;
} #else
chain->off += n;
advance_last_with_data(buf); #endif
buf->total_len += n;
buf->n_add_for_cb += n;
/* Tell someone about changes in this buffer */
evbuffer_invoke_callbacks_(buf);
result = n;
done:
EVBUFFER_UNLOCK(buf); return result;
}
#ifdef USE_IOVEC_IMPL staticinlineint
evbuffer_write_iovec(struct evbuffer *buffer, evutil_socket_t fd,
ev_ssize_t howmuch)
{
IOV_TYPE iov[NUM_WRITE_IOVEC]; struct evbuffer_chain *chain = buffer->first; int n, i = 0;
if (howmuch < 0) return -1;
ASSERT_EVBUFFER_LOCKED(buffer); /* XXX make this top out at some maximal data length? if the * buffer has (say) 1MB in it, split over 128 chains, there's
* no way it all gets written in one go. */ while (chain != NULL && i < NUM_WRITE_IOVEC && howmuch) { #ifdef USE_SENDFILE /* we cannot write the file info via writev */ if (chain->flags & EVBUFFER_SENDFILE) break; #endif
iov[i].IOV_PTR_FIELD = (void *) (chain->buffer + chain->misalign); if ((size_t)howmuch >= chain->off) { /* XXXcould be problematic when windows supports mmap*/
iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)chain->off;
howmuch -= chain->off;
} else { /* XXXcould be problematic when windows supports mmap*/
iov[i++].IOV_LEN_FIELD = (IOV_LEN_TYPE)howmuch; break;
}
chain = chain->next;
} if (! i) return 0;
#ifdef _WIN32
{
DWORD bytesSent; if (WSASend(fd, iov, i, &bytesSent, 0, NULL, NULL))
n = -1; else
n = bytesSent;
} #else
n = writev(fd, iov, i); #endif return (n);
} #endif
/* Subract <b>howfar</b> from the position of <b>pos</b> within * <b>buf</b>. Returns 0 on success, -1 on failure. * * This isn't exposed yet, because of potential inefficiency issues.
* Maybe it should be. */ staticint
evbuffer_ptr_subtract(struct evbuffer *buf, struct evbuffer_ptr *pos,
size_t howfar)
{ if (pos->pos < 0) return -1; if (howfar > (size_t)pos->pos) return -1; if (pos->internal_.chain && howfar <= pos->internal_.pos_in_chain) {
pos->internal_.pos_in_chain -= howfar;
pos->pos -= howfar; return 0;
} else { const size_t newpos = pos->pos - howfar; /* Here's the inefficient part: it walks over the
* chains until we hit newpos. */ return evbuffer_ptr_set(buf, pos, newpos, EVBUFFER_PTR_SET);
}
}
int
evbuffer_ptr_set(struct evbuffer *buf, struct evbuffer_ptr *pos,
size_t position, enum evbuffer_ptr_how how)
{
size_t left = position; struct evbuffer_chain *chain = NULL; int result = 0;
EVBUFFER_LOCK(buf);
switch (how) { case EVBUFFER_PTR_SET:
chain = buf->first;
pos->pos = position;
position = 0; break; case EVBUFFER_PTR_ADD: /* this avoids iterating over all previous chains if
we just want to advance the position */ if (pos->pos < 0 || EV_SIZE_MAX - position < (size_t)pos->pos) {
EVBUFFER_UNLOCK(buf); return -1;
}
chain = pos->internal_.chain;
pos->pos += position;
position = pos->internal_.pos_in_chain; break;
}
EVUTIL_ASSERT(EV_SIZE_MAX - left >= position); while (chain && position + left >= chain->off) {
left -= chain->off - position;
chain = chain->next;
position = 0;
} if (chain) {
pos->internal_.chain = chain;
pos->internal_.pos_in_chain = position + left;
} elseif (left == 0) { /* The first byte in the (nonexistent) chain after the last chain */
pos->internal_.chain = NULL;
pos->internal_.pos_in_chain = 0;
} else {
PTR_NOT_FOUND(pos);
result = -1;
}
EVBUFFER_UNLOCK(buf);
return result;
}
/** Compare the bytes in buf at position pos to the len bytes in mem. Return less than 0, 0, or greater than 0 as memcmp.
*/ staticint
evbuffer_ptr_memcmp(conststruct evbuffer *buf, conststruct evbuffer_ptr *pos, constchar *mem, size_t len)
{ struct evbuffer_chain *chain;
size_t position; int r;
ASSERT_EVBUFFER_LOCKED(buf);
if (pos->pos < 0 ||
EV_SIZE_MAX - len < (size_t)pos->pos ||
pos->pos + len > buf->total_len) return -1;
chain = pos->internal_.chain;
position = pos->internal_.pos_in_chain; while (len && chain) {
size_t n_comparable; if (len + position > chain->off)
n_comparable = chain->off - position; else
n_comparable = len;
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.25 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.