/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.
*/
staticvoid check_modules(int force)
{ staticint checked = 0; int i;
if (force || !checked) { for (i = 0; ap_loaded_modules[i]; ++i) {
module *m = ap_loaded_modules[i];
if (!strcmp("event.c", m->name)) {
mpm_module = m; break;
} elseif (!strcmp("motorz.c", m->name)) {
mpm_module = m; break;
} elseif (!strcmp("mpm_netware.c", m->name)) {
mpm_module = m; break;
} elseif (!strcmp("prefork.c", m->name)) {
mpm_module = m; /* While http2 can work really well on prefork, it collides * today's use case for prefork: running single-thread app engines * like php. If we restrict h2_workers to 1 per process, php will * work fine, but browser will be limited to 1 active request at a
* time. */
mpm_supported = 0; break;
} elseif (!strcmp("simple_api.c", m->name)) {
mpm_module = m;
mpm_supported = 0; break;
} elseif (!strcmp("mpm_winnt.c", m->name)) {
mpm_module = m; break;
} elseif (!strcmp("worker.c", m->name)) {
mpm_module = m; break;
}
}
checked = 1;
}
}
if (conn_ctx->beam_in) {
h2_beam_abort(conn_ctx->beam_in, from);
} if (conn_ctx->beam_out) {
h2_beam_abort(conn_ctx->beam_out, from);
}
c2->aborted = 1;
}
typedefstruct {
apr_bucket_brigade *bb; /* c2: data in holding area */ unsigned did_upgrade_eos:1; /* for Upgrade, we added an extra EOS */
} h2_c2_fctx_in_t;
if (!fctx) {
fctx = apr_pcalloc(f->c->pool, sizeof(*fctx));
f->ctx = fctx;
fctx->bb = apr_brigade_create(f->c->pool, f->c->bucket_alloc); if (!conn_ctx->beam_in) {
b = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(fctx->bb, b);
}
}
/* If this is a HTTP Upgrade, it means the request we process * has not Content, although the stream is not necessarily closed. * On first read, we insert an EOS to signal processing that it
* has the complete body. */ if (conn_ctx->is_upgrade && !fctx->did_upgrade_eos) {
b = apr_bucket_eos_create(f->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(fctx->bb, b);
fctx->did_upgrade_eos = 1;
}
while (APR_BRIGADE_EMPTY(fctx->bb)) { /* Get more input data for our request. */ if (APLOGctrace2(f->c)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, f->c, "h2_c2_in(%s-%d): get more data from mplx, block=%d, " "readbytes=%ld",
conn_ctx->id, conn_ctx->stream_id, block, (long)readbytes);
} if (conn_ctx->beam_in) { if (conn_ctx->pipe_in[H2_PIPE_OUT]) {
receive:
status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, APR_NONBLOCK_READ,
conn_ctx->mplx->stream_max_mem); if (APR_STATUS_IS_EAGAIN(status) && APR_BLOCK_READ == block) {
status = h2_util_wait_on_pipe(conn_ctx->pipe_in[H2_PIPE_OUT]); if (APR_SUCCESS == status) { goto receive;
}
}
} else {
status = h2_beam_receive(conn_ctx->beam_in, f->c, fctx->bb, block,
conn_ctx->mplx->stream_max_mem);
}
} else {
status = APR_EOF;
}
if (APLOGctrace3(f->c)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c, "h2_c2_in(%s-%d): read returned",
conn_ctx->id, conn_ctx->stream_id);
} if (APR_STATUS_IS_EAGAIN(status)
&& (mode == AP_MODE_GETLINE || block == APR_BLOCK_READ)) { /* chunked input handling does not seem to like it if we * return with APR_EAGAIN from a GETLINE read...
* upload 100k test on test-ser.example.org hangs */
status = APR_SUCCESS;
} elseif (APR_STATUS_IS_EOF(status)) { break;
} elseif (status != APR_SUCCESS) {
conn_ctx->last_err = status; return status;
}
/* Nothing there, no more data to get. Return. */ if (status == APR_EOF && APR_BRIGADE_EMPTY(fctx->bb)) { return status;
}
if (APLOGctrace3(f->c)) {
h2_util_bb_log(f->c, conn_ctx->stream_id, APLOG_TRACE3, "c2 input.bb", fctx->bb);
}
if (APR_BRIGADE_EMPTY(fctx->bb)) { if (APLOGctrace3(f->c)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, f->c, "h2_c2_in(%s-%d): no data",
conn_ctx->id, conn_ctx->stream_id);
} return (block == APR_NONBLOCK_READ)? APR_EAGAIN : APR_EOF;
}
if (mode == AP_MODE_EXHAUSTIVE) { /* return all we have */
APR_BRIGADE_CONCAT(bb, fctx->bb);
} elseif (mode == AP_MODE_READBYTES) {
status = h2_brigade_concat_length(bb, fctx->bb, rmax);
} elseif (mode == AP_MODE_SPECULATIVE) {
status = h2_brigade_copy_length(bb, fctx->bb, rmax);
} elseif (mode == AP_MODE_GETLINE) { /* we are reading a single LF line, e.g. the HTTP headers. * this has the nasty side effect to split the bucket, even
* though it ends with CRLF and creates a 0 length bucket */
status = apr_brigade_split_line(bb, fctx->bb, block,
HUGE_STRING_LEN); if (APLOGctrace3(f->c)) { char buffer[1024];
apr_size_t len = sizeof(buffer)-1;
apr_brigade_flatten(bb, buffer, &len);
buffer[len] = 0;
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, f->c, "h2_c2_in(%s-%d): getline: %s",
conn_ctx->id, conn_ctx->stream_id, buffer);
}
} else { /* Hmm, well. There is mode AP_MODE_EATCRLF, but we chose not
* to support it. Seems to work. */
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_ENOTIMPL, f->c,
APLOGNO(03472) "h2_c2_in(%s-%d), unsupported READ mode %d",
conn_ctx->id, conn_ctx->stream_id, mode);
status = APR_ENOTIMPL;
}
if (!c2->master || !(conn_ctx = h2_conn_ctx_get(c2)) || !conn_ctx->stream_id) { return DECLINED;
} /* Now that the request_rec is fully initialized, set relevant params */
conn_ctx->server = r->server;
timeout = h2_config_geti64(r, r->server, H2_CONF_STREAM_TIMEOUT); if (timeout <= 0) {
timeout = r->server->timeout;
}
h2_conn_ctx_set_timeout(conn_ctx, timeout); /* We only handle this one request on the connection and tell everyone * that there is no need to keep it "clean" if something fails. Also, * this prevents mod_reqtimeout from doing funny business with monitoring * keepalive timeouts.
*/
r->connection->keepalive = AP_CONN_CLOSE;
if (conn_ctx->beam_in && !apr_table_get(r->headers_in, "Content-Length")) {
r->body_indeterminate = 1;
}
if (h2_config_sgeti(conn_ctx->server, H2_CONF_COPY_FILES)) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "h2_mplx(%s-%d): copy_files in output",
conn_ctx->id, conn_ctx->stream_id);
h2_beam_set_copy_files(conn_ctx->beam_out, 1);
}
/* Add the raw bytes of the request (e.g. header frame lengths to
* the logio for this request. */ if (conn_ctx->request->raw_bytes && h2_c_logio_add_bytes_in) {
h2_c_logio_add_bytes_in(c2, conn_ctx->request->raw_bytes);
} return OK;
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c2, "h2_c2(%s-%d), adding filters",
conn_ctx->id, conn_ctx->stream_id);
ap_add_input_filter_handle(c2_net_in_filter_handle, NULL, NULL, c2);
ap_add_output_filter_handle(c2_net_out_filter_handle, NULL, NULL, c2); if (c2->keepalives == 0) { /* Simulate that we had already a request on this connection. Some * hooks trigger special behaviour when keepalives is 0. * (Not necessarily in pre_connection, but later. Set it here, so it
* is in place.) */
c2->keepalives = 1; /* We signal that this connection will be closed after the request. * Which is true in that sense that we throw away all traffic data * on this c2 connection after each requests. Although we might * reuse internal structures like memory pools. * The wanted effect of this is that httpd does not try to clean up * any dangling data on this connection when a request is done. Which * is unnecessary on a h2 stream.
*/
c2->keepalive = AP_CONN_CLOSE;
} return OK;
}
void h2_c2_register_hooks(void)
{ /* When the connection processing actually starts, we might * take over, if the connection is for a h2 stream.
*/
ap_hook_pre_connection(c2_hook_pre_connection,
NULL, NULL, APR_HOOK_MIDDLE);
/* We need to manipulate the standard HTTP/1.1 protocol filters and
* install our own. This needs to be done very early. */
ap_hook_pre_read_request(c2_pre_read_request, NULL, NULL, APR_HOOK_MIDDLE);
ap_hook_post_read_request(c2_post_read_request, NULL, NULL,
APR_HOOK_REALLY_FIRST);
ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST); #if H2_USE_POLLFD_FROM_CONN
ap_hook_get_pollfd_from_conn(http2_get_pollfd_from_conn, NULL, NULL,
APR_HOOK_MIDDLE); #endif
APR_REGISTER_OPTIONAL_FN(http2_get_pollfd_from_conn);
static apr_status_t c2_run_pre_connection(conn_rec *c2, apr_socket_t *csd)
{ if (c2->keepalives == 0) { /* Simulate that we had already a request on this connection. Some * hooks trigger special behaviour when keepalives is 0. * (Not necessarily in pre_connection, but later. Set it here, so it
* is in place.) */
c2->keepalives = 1; /* We signal that this connection will be closed after the request. * Which is true in that sense that we throw away all traffic data * on this c2 connection after each requests. Although we might * reuse internal structures like memory pools. * The wanted effect of this is that httpd does not try to clean up * any dangling data on this connection when a request is done. Which * is unnecessary on a h2 stream.
*/
c2->keepalive = AP_CONN_CLOSE; return ap_run_pre_connection(c2, csd);
}
ap_assert(c2->output_filters); return APR_SUCCESS;
}
/* See the discussion at <https://github.com/icing/mod_h2/issues/195> * * Each conn_rec->id is supposed to be unique at a point in time. Since * some modules (and maybe external code) uses this id as an identifier * for the request_rec they handle, it needs to be unique for secondary * connections also. * * The MPM module assigns the connection ids and mod_unique_id is using * that one to generate identifier for requests. While the implementation * works for HTTP/1.x, the parallel execution of several requests per * connection will generate duplicate identifiers on load. * * The original implementation for secondary connection identifiers used * to shift the master connection id up and assign the stream id to the * lower bits. This was cramped on 32 bit systems, but on 64bit there was * enough space. * * As issue 195 showed, mod_unique_id only uses the lower 32 bit of the * connection id, even on 64bit systems. Therefore collisions in request ids. * * The way master connection ids are generated, there is some space "at the * top" of the lower 32 bits on allmost all systems. If you have a setup * with 64k threads per child and 255 child processes, you live on the edge. * * The new implementation shifts 8 bits and XORs in the worker * id. This will experience collisions with > 256 h2 workers and heavy * load still. There seems to be no way to solve this in all possible * configurations by mod_h2 alone.
*/
c2->id = (c2->master->id << 8)^worker_id;
/* Add the raw bytes of the request (e.g. header frame lengths to
* the logio for this request. */ if (req->raw_bytes && h2_c_logio_add_bytes_in) {
h2_c_logio_add_bytes_in(c, req->raw_bytes);
}
ap_process_request(r); /* After the call to ap_process_request, the
* request pool may have been deleted. */
r = NULL; if (conn_ctx->beam_out) {
h2_beam_close(conn_ctx->beam_out, c);
}
ap_assert(c1);
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c1, "h2_c2: create for c1(%ld)", c1->id);
/* We create a pool with its own allocator to be used for * processing a request. This is the only way to have the processing * independent of its parent pool in the sense that it can work in * another thread.
*/
apr_pool_create(&pool, parent);
apr_pool_tag(pool, "h2_c2_conn");
c2->master = c1;
c2->pool = pool;
c2->conn_config = ap_create_conn_config(pool);
c2->notes = apr_table_make(pool, 5);
c2->input_filters = NULL;
c2->output_filters = NULL;
c2->keepalives = 0; #if AP_MODULE_MAGIC_AT_LEAST(20180903, 1)
c2->filter_conn_ctx = NULL; #endif
c2->bucket_alloc = apr_bucket_alloc_create(pool); #if !AP_MODULE_MAGIC_AT_LEAST(20180720, 1)
c2->data_in_input_filters = 0;
c2->data_in_output_filters = 0; #endif /* prevent mpm_event from making wrong assumptions about this connection,
* like e.g. using its socket for an async read check. */
c2->clogging_input_filters = 1;
c2->log = NULL;
c2->aborted = 0; /* We cannot install the master connection socket on the secondary, as * modules mess with timeouts/blocking of the socket, with * unwanted side effects to the master connection processing. * Fortunately, since we never use the secondary socket, we can just install * a single, process-wide dummy and everyone is happy.
*/
ap_set_module_config(c2->conn_config, &core_module, dummy_socket); /* TODO: these should be unique to this thread */
c2->sbh = NULL; /*c1->sbh;*/ /* TODO: not all mpm modules have learned about secondary connections yet. * copy their config from master to secondary.
*/ if (mpm_module) {
cfg = ap_get_module_config(c1->conn_config, mpm_module);
ap_set_module_config(c2->conn_config, mpm_module, cfg);
}
/* setup the correct filters to process the request for h2 */
ap_add_input_filter("H2_C2_REQUEST_IN", NULL, r, r->connection);
/* replace the core http filter that formats response headers
* in HTTP/1 with our own that collects status and headers */
ap_remove_output_filter_byhandle(r->output_filters, "HTTP_HEADER");
void h2_c2_register_hooks(void)
{ /* When the connection processing actually starts, we might * take over, if the connection is for a h2 stream.
*/
ap_hook_process_connection(h2_c2_hook_process,
NULL, NULL, APR_HOOK_FIRST); /* We need to manipulate the standard HTTP/1.1 protocol filters and
* install our own. This needs to be done very early. */
ap_hook_post_read_request(h2_c2_hook_post_read_request, NULL, NULL, APR_HOOK_REALLY_FIRST);
ap_hook_fixups(c2_hook_fixups, NULL, NULL, APR_HOOK_LAST); #if H2_USE_POLLFD_FROM_CONN
ap_hook_get_pollfd_from_conn(http2_get_pollfd_from_conn, NULL, NULL,
APR_HOOK_MIDDLE); #endif
APR_REGISTER_OPTIONAL_FN(http2_get_pollfd_from_conn);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.