/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License.
*/
/* * The H2_PING connection sub-state: a state independant of the H2_SESSION state * of the connection: * - H2_PING_ST_NONE: no interference with request handling, ProxyTimeout in effect. * When entered, all suspended streams are unsuspended again. * - H2_PING_ST_AWAIT_ANY: new requests are suspended, a possibly configured "ping" * timeout is in effect. Any frame received transits to H2_PING_ST_NONE. * - H2_PING_ST_AWAIT_PING: same as above, but only a PING frame transits * to H2_PING_ST_NONE. * * An AWAIT state is entered on a new connection or when re-using a connection and * the last frame received has been some time ago. The latter sends a PING frame * and insists on an answer, the former is satisfied by any frame received from the * backend. * * This works for new connections as there is always at least one SETTINGS frame * that the backend sends. When re-using connection, we send a PING and insist on * receiving one back, as there might be frames in our connection buffers from * some time ago. Since some servers have protections against PING flooding, we * only ever have one PING unanswered. * * Requests are suspended while in a PING state, as we do not want to send data * before we can be reasonably sure that the connection is working (at least on * the h2 protocol level). This also means that the session can do blocking reads * when expecting PING answers.
*/ staticvoid set_ping_timeout(h2_proxy_session *session)
{ if (session->ping_timeout != -1 && session->save_timeout == -1) {
apr_socket_t *socket = NULL;
staticvoid ping_reuse_session(h2_proxy_session *session)
{ if (H2_PING_ST_NONE == session->ping_state) {
apr_interval_time_t age = apr_time_now() - session->last_frame_received; if (age > apr_time_from_sec(1)) {
enter_ping_state(session, H2_PING_ST_AWAIT_PING);
}
}
}
staticvoid ping_ev_frame_received(h2_proxy_session *session, const nghttp2_frame *frame)
{
session->last_frame_received = apr_time_now(); switch (session->ping_state) { case H2_PING_ST_NONE: /* nop */ break; case H2_PING_ST_AWAIT_ANY:
enter_ping_state(session, H2_PING_ST_NONE); break; case H2_PING_ST_AWAIT_PING: if (NGHTTP2_PING == frame->hd.type) {
enter_ping_state(session, H2_PING_ST_NONE);
} /* we may receive many other frames while we are waiting for the * PING answer. They may come all from our connection buffers and
* say nothing about the current state of the backend. */ break;
}
}
ping_ev_frame_received(session, frame); /* Action for frame types: */ switch (frame->hd.type) { case NGHTTP2_HEADERS:
stream = nghttp2_session_get_stream_user_data(ngh2, frame->hd.stream_id); if (!stream) { return NGHTTP2_ERR_CALLBACK_FAILURE;
}
r = stream->r; if (r->status >= 100 && r->status < 200) { /* By default, we will forward all interim responses when
* we are sitting on a HTTP/2 connection to the client */ int forward = session->h2_front; switch(r->status) { case 100: if (stream->waiting_on_100) {
stream->waiting_on_100 = 0;
r->status_line = ap_get_status_line(r->status);
forward = 1;
} break; case 103: /* workaround until we get this into http protocol base * parts. without this, unknown codes are converted to
* 500... */
r->status_line = "103 Early Hints"; break; default:
r->status_line = ap_get_status_line(r->status); break;
}
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(03487) "h2_proxy_session(%s): got interim HEADERS, " "status=%d, will forward=%d",
session->id, r->status, forward); if (forward) {
ap_send_interim_response(r, 1);
}
}
stream_resume(stream); break; case NGHTTP2_PING: break; case NGHTTP2_PUSH_PROMISE: break; case NGHTTP2_SETTINGS: if (frame->settings.niv > 0) {
n = nghttp2_session_get_remote_settings(ngh2, NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS); if (n > 0) {
session->remote_max_concurrent = n;
}
} break; case NGHTTP2_GOAWAY: /* we expect the remote server to tell us the highest stream id
* that it has started processing. */
session->last_stream_id = frame->goaway.last_stream_id;
dispatch_event(session, H2_PROXYS_EV_REMOTE_GOAWAY, 0, NULL); break; default: break;
} return 0;
}
/* Now, add in the cookies from the response to the ones already saved */
apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
/* and now load 'em all in */ if (!apr_is_empty_table(stream->saves)) {
apr_table_unset(r->headers_out, "Set-Cookie");
r->headers_out = apr_table_overlay(p, r->headers_out, stream->saves);
}
if ((buf = apr_table_get(r->headers_out, "Content-Type"))) {
ap_set_content_type(r, apr_pstrdup(p, buf));
}
/* handle Via header in response */ if (session->conf->viaopt != via_off
&& session->conf->viaopt != via_block) { constchar *server_name = ap_get_server_name(stream->r);
apr_port_t port = ap_get_server_port(stream->r); char portstr[32];
/* If USE_CANONICAL_NAME_OFF was configured for the proxy virtual host, * then the server name returned by ap_get_server_name() is the * origin server name (which doesn't make sense with Via: headers) * so we use the proxy vhost's name instead.
*/ if (server_name == stream->r->hostname) {
server_name = stream->r->server->server_hostname;
} if (ap_is_default_port(port, stream->r)) {
portstr[0] = '\0';
} else {
apr_snprintf(portstr, sizeof(portstr), ":%d", port);
}
stream = nghttp2_session_get_stream_user_data(ngh2, stream_id); if (!stream) {
ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, ap_server_conf, APLOGNO(03358) "h2_proxy_session(%s): recv data chunk for " "unknown stream %d, ignored",
session->id, stream_id); return 0;
}
if (!stream->data_received) { /* last chance to manipulate response headers.
* after this, only trailers */
h2_proxy_stream_end_headers_out(stream);
}
stream->data_received += len;
b = apr_bucket_transient_create((constchar*)data, len,
stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b); /* always flush after a DATA frame, as we have no other indication
* of buffer use */
b = apr_bucket_flush_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
if (stream->session->ping_state != H2_PING_ST_NONE) { /* suspend until we hear from the other side */
stream->waiting_on_ping = 1;
status = APR_EAGAIN;
} elseif (stream->r->expecting_100) { /* suspend until the answer comes */
stream->waiting_on_100 = 1;
status = APR_EAGAIN;
} elseif (APR_BRIGADE_EMPTY(stream->input)) {
status = ap_get_brigade(stream->r->input_filters, stream->input,
AP_MODE_READBYTES, APR_NONBLOCK_READ,
H2MAX(APR_BUCKET_BUFF_SIZE, length));
ap_log_rerror(APLOG_MARK, APLOG_TRACE2, status, stream->r, "h2_proxy_stream(%s-%d): request body read",
stream->session->id, stream->id);
}
if (status == APR_SUCCESS) {
size_t readlen = 0; while (status == APR_SUCCESS
&& (readlen < length)
&& !APR_BRIGADE_EMPTY(stream->input)) {
apr_bucket* b = APR_BRIGADE_FIRST(stream->input); if (APR_BUCKET_IS_METADATA(b)) { if (APR_BUCKET_IS_EOS(b)) {
*data_flags |= NGHTTP2_DATA_FLAG_EOF;
} else { /* we do nothing more regarding any meta here */
}
} else { constchar *bdata = NULL;
apr_size_t blen = 0;
status = apr_bucket_read(b, &bdata, &blen, APR_BLOCK_READ);
if (status == APR_SUCCESS && blen > 0) {
size_t copylen = H2MIN(length - readlen, blen);
memcpy(buf, bdata, copylen);
buf += copylen;
readlen += copylen; if (copylen < blen) { /* We have data left in the bucket. Split it. */
status = apr_bucket_split(b, copylen);
}
}
}
apr_bucket_delete(b);
}
dconf = ap_get_module_config(r->per_dir_config, &proxy_module); if (dconf->preserve_host) {
authority = orig_host; if (!authority) { /* Duplicate mod_proxy behaviour if ProxyPreserveHost is * used but an "HTTP/0.9" request is received without a
* Host: header */
authority = r->server->server_hostname;
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(10511) "HTTP/0.9 request (with no host line) " "on incoming request and preserve host set " "forcing hostname to be %s for uri %s",
authority, r->uri);
apr_table_setn(r->headers_in, "Host", authority);
}
} else {
authority = puri.hostname; if (!ap_strchr_c(authority, ':') && puri.port
&& apr_uri_port_of_scheme(scheme) != puri.port) { /* port info missing and port is not default for scheme: append */
authority = apr_psprintf(stream->pool, "%s:%d", authority, puri.port);
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->cfront, "authority=%s from uri.hostname=%s and uri.port=%d",
authority, puri.hostname, puri.port);
} /* See #235, we use only :authority when available and remove Host:
* since differing values are not acceptable, see RFC 9113 ch. 8.3.1 */ if (authority && strlen(authority)) {
apr_table_unset(r->headers_in, "Host");
}
/* we need this for mapping relative uris in headers ("Link") back
* to local uris */
stream->real_server_uri = apr_psprintf(stream->pool, "%s://%s", scheme, authority);
stream->p_server_uri = apr_psprintf(stream->pool, "%s://%s", puri.scheme, authority);
path = apr_uri_unparse(stream->pool, &puri, APR_URI_UNP_OMITSITEPART);
if (dconf->add_forwarded_headers) { if (PROXYREQ_REVERSE == r->proxyreq) { /* Add X-Forwarded-For: so that the upstream has a chance to * determine, where the original request came from.
*/
apr_table_mergen(stream->req->headers, "X-Forwarded-For",
r->useragent_ip);
/* Add X-Forwarded-Host: so that upstream knows what the * original request hostname was.
*/ if (orig_host) {
apr_table_mergen(stream->req->headers, "X-Forwarded-Host",
orig_host);
}
/* Add X-Forwarded-Server: so that upstream knows what the * name of this proxy server is (if there are more than one) * XXX: This duplicates Via: - do we strictly need it?
*/
apr_table_mergen(stream->req->headers, "X-Forwarded-Server",
r->server->server_hostname);
}
}
/* Tuck away all already existing cookies */
stream->saves = apr_table_make(r->pool, 2);
apr_table_do(add_header, stream->saves, r->headers_out, "Set-Cookie", NULL);
hd = h2_proxy_util_nghd_make_req(stream->pool, stream->req);
/* If we expect a 100-continue response, we must refrain from reading any input until we get it. Reading the input will possibly trigger
HTTP_IN filter to generate the 100-continue itself. */ if (stream->waiting_on_100 || stream->waiting_on_ping) { /* make a small test if we get an EOF/EOS immediately */
status = ap_get_brigade(stream->r->input_filters, stream->input,
AP_MODE_READBYTES, APR_NONBLOCK_READ,
APR_BUCKET_BUFF_SIZE);
may_have_request_body = APR_STATUS_IS_EAGAIN(status)
|| (status == APR_SUCCESS
&& !APR_BUCKET_IS_EOS(APR_BRIGADE_FIRST(stream->input)));
}
staticvoid ev_no_io(h2_proxy_session *session, int arg, constchar *msg)
{ switch (session->state) { case H2_PROXYS_ST_BUSY: case H2_PROXYS_ST_LOCAL_SHUTDOWN: case H2_PROXYS_ST_REMOTE_SHUTDOWN: /* nothing for input and output to do. If we remain * in this state, we go into a tight loop and suck up * CPU cycles. Ideally, we'd like to do a blocking read, but that * is not possible if we have scheduled tasks and wait
* for them to produce something. */ if (h2_proxy_ihash_empty(session->streams)) { if (!is_accepting_streams(session)) { /* We are no longer accepting new streams and have
* finished processing existing ones. Time to leave. */
session_shutdown(session, arg, msg);
transit(session, "no io", H2_PROXYS_ST_DONE);
} else { /* When we have no streams, no task events are possible,
* switch to blocking reads */
transit(session, "no io", H2_PROXYS_ST_IDLE);
}
} else { /* Unable to do blocking reads, as we wait on events from * task processing in other threads. Do a busy wait with
* backoff timer. */
transit(session, "no io", H2_PROXYS_ST_WAIT);
} break; default: /* nop */ break;
}
}
staticvoid ev_stream_submitted(h2_proxy_session *session, int stream_id, constchar *msg)
{ switch (session->state) { case H2_PROXYS_ST_IDLE: case H2_PROXYS_ST_WAIT:
transit(session, "stream submitted", H2_PROXYS_ST_BUSY); break; default: /* nop */ break;
}
}
stream = nghttp2_session_get_stream_user_data(session->ngh2, stream_id); if (stream) { /* if the stream's connection is aborted, do not send anything
* more on it. */
apr_status_t status = (stream->error_code == 0)? APR_SUCCESS : APR_EINVAL; int touched = (stream->data_sent || stream->data_received ||
stream_id <= session->last_stream_id); if (!stream->cfront->aborted) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, stream->cfront, APLOGNO(03364) "h2_proxy_sesssion(%s): stream(%d) closed " "(touched=%d, error=%d)",
session->id, stream_id, touched, stream->error_code);
if (status != APR_SUCCESS) { /* stream failed. If we have received (and forwarded) response * data already, we need to append an error buckt to inform * consumers. * Otherwise, we have an early fail on the connection and may * retry this request on a new one. In that case, keep the
* output virgin so that a new attempt can be made. */ if (stream->data_received) { int http_status = ap_map_http_request_error(status, HTTP_BAD_REQUEST);
b = ap_bucket_error_create(http_status, NULL, stream->r->pool,
stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
b = apr_bucket_eos_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
ap_pass_brigade(stream->r->output_filters, stream->output);
}
} elseif (!stream->data_received) { /* if the response had no body, this is the time to flush
* an empty brigade which will also write the response headers */
h2_proxy_stream_end_headers_out(stream);
stream->data_received = 1;
b = apr_bucket_flush_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
b = apr_bucket_eos_create(stream->cfront->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->output, b);
ap_pass_brigade(stream->r->output_filters, stream->output);
}
}
case H2_PROXYS_ST_WAIT: if (is_waiting_for_backend(session)) { /* we can do a blocking read with the default timeout (as * configured via ProxyTimeout in our socket. There is * nothing we want to send or check until we get more data
* from the backend. */
status = h2_proxy_session_read(session, 1, 0); if (status == APR_SUCCESS) {
have_read = 1;
dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
} else {
dispatch_event(session, H2_PROXYS_EV_CONN_ERROR, status, NULL); return status;
}
} elseif (check_suspended(session) == APR_EAGAIN) { /* no stream has become resumed. Do a blocking read with
* ever increasing timeouts... */ if (session->wait_timeout < 25) {
session->wait_timeout = 25;
} else {
session->wait_timeout = H2MIN(apr_time_from_msec(100),
2*session->wait_timeout);
}
status = h2_proxy_session_read(session, 1, session->wait_timeout);
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, session->c,
APLOGNO(03365) "h2_proxy_session(%s): WAIT read, timeout=%fms",
session->id, session->wait_timeout/1000.0); if (status == APR_SUCCESS) {
have_read = 1;
dispatch_event(session, H2_PROXYS_EV_DATA_READ, 0, NULL);
} elseif (APR_STATUS_IS_TIMEUP(status)
|| APR_STATUS_IS_EAGAIN(status)) { /* go back to checking all inputs again */
transit(session, "wait cycle", H2_PROXYS_ST_BUSY);
}
} break;
case H2_PROXYS_ST_IDLE: break;
case H2_PROXYS_ST_DONE: /* done, session terminated */ return APR_EOF;