Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Configuration Options for HTTP/1 Max Headers and Buffer Limits #6194

Merged
merged 34 commits into from
Nov 19, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
236d560
update hyper to 0.14.31
IvanGoncharov Oct 24, 2024
b66c3f2
Adds `supergraph.experimental_http1_max_headers` option
IvanGoncharov Oct 24, 2024
03760c7
fix lint
IvanGoncharov Oct 24, 2024
2d3e901
Merge branch 'dev' into i1g/max_headers
IvanGoncharov Nov 5, 2024
1969f11
Add experimental_http1_max_buf_size
IvanGoncharov Nov 7, 2024
987e0bb
fix format
IvanGoncharov Nov 7, 2024
22b8239
Use tagged release on our organization
abernix Nov 8, 2024
e39447b
Merge branch 'dev' into i1g/max_headers
abernix Nov 8, 2024
e20fd0b
Merge branch 'dev' into i1g/max_headers
IvanGoncharov Nov 13, 2024
1de4a55
remove duplication in Cargo.toml
IvanGoncharov Nov 13, 2024
199fca0
fix lint
Nov 13, 2024
9d0115a
fix compile error
Nov 13, 2024
9344ef0
Update snapshot
Nov 13, 2024
c0bac69
fix tests
Nov 13, 2024
1491ac0
human-friendly config
Nov 13, 2024
67bdb1f
Add docs
Nov 14, 2024
941aa16
Add crate feature
Nov 14, 2024
f286960
review changes
Nov 14, 2024
1d0bf1f
add feature flag to xtask commands
Nov 14, 2024
6837adf
Apply suggestions from code review
IvanGoncharov Nov 14, 2024
e5c8bd0
Merge branch 'dev' into i1g/max_headers
IvanGoncharov Nov 14, 2024
450b7e7
pass experimental_hyper_header_limits feature to xtask commands
IvanGoncharov Nov 18, 2024
274f062
fix xtask build
IvanGoncharov Nov 18, 2024
2d2c2bf
Merge branch 'dev' into i1g/max_headers
IvanGoncharov Nov 18, 2024
725f510
Update snapshot
IvanGoncharov Nov 18, 2024
c445493
Add missing changeset
IvanGoncharov Nov 18, 2024
ac1b5ac
Fix tests failing without feature flag
IvanGoncharov Nov 18, 2024
c31ce67
Switch kb => kib in tests and docs
IvanGoncharov Nov 18, 2024
e44b154
Revert unwanted changes in Cargo.lock
IvanGoncharov Nov 18, 2024
bea7a96
Merge branch 'dev' into i1g/max_headers
IvanGoncharov Nov 18, 2024
27ef7ab
Address review on changelog
IvanGoncharov Nov 18, 2024
0d2709b
Drop experimental prefix
IvanGoncharov Nov 18, 2024
cdf4e19
experimental_hyper_header_limits => hyper_header_limits
IvanGoncharov Nov 18, 2024
bd686e5
fix CI after removing experimental_
IvanGoncharov Nov 18, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,018 changes: 479 additions & 539 deletions Cargo.lock

Large diffs are not rendered by default.

3 changes: 3 additions & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -76,3 +76,6 @@ sha1 = "0.10.6"
tempfile = "3.10.1"
tokio = { version = "1.36.0", features = ["full"] }
tower = { version = "0.4.13", features = ["full"] }

[patch.crates-io]
"hyper" = { git = "https://github.com/IvanGoncharov/hyper.git", branch = "backpor_header_size" }
abernix marked this conversation as resolved.
Show resolved Hide resolved
2 changes: 1 addition & 1 deletion apollo-router/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ http-body = "0.4.6"
heck = "0.5.0"
humantime = "2.1.0"
humantime-serde = "1.1.1"
hyper = { version = "0.14.28", features = ["server", "client", "stream"] }
hyper = { version = "0.14.31", features = ["server", "client", "stream"] }
hyper-rustls = { version = "0.24.2", features = ["http1", "http2"] }
indexmap = { version = "2.2.6", features = ["serde"] }
itertools = "0.13.0"
Expand Down
13 changes: 13 additions & 0 deletions apollo-router/src/axum_factory/axum_http_server_factory.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use std::time::Instant;

use axum::error_handling::HandleErrorLayer;
Expand All @@ -24,6 +25,7 @@ use http::header::CONTENT_ENCODING;
use http::HeaderValue;
use http::Request;
use http_body::combinators::UnsyncBoxBody;
use hyper::server::conn::Http;
use hyper::Body;
use itertools::Itertools;
use multimap::MultiMap;
Expand Down Expand Up @@ -298,12 +300,22 @@ impl HttpServerFactory for AxumHttpServerFactory {
let actual_main_listen_address = main_listener
.local_addr()
.map_err(ApolloRouterError::ServerCreationError)?;
let mut http_config = Http::new();
http_config.http1_keep_alive(true);
IvanGoncharov marked this conversation as resolved.
Show resolved Hide resolved
http_config.http1_header_read_timeout(Duration::from_secs(10));
if let Some(max_headers) = configuration.supergraph.experimental_http1_max_headers {
http_config.http1_max_headers(max_headers);
}
if let Some(max_buf_size) = configuration.supergraph.experimental_http1_max_buf_size {
http_config.max_buf_size(max_buf_size);
}

let (main_server, main_shutdown_sender) = serve_router_on_listen_addr(
main_listener,
actual_main_listen_address.clone(),
all_routers.main.1,
true,
http_config.clone(),
all_connections_stopped_sender.clone(),
);

Expand Down Expand Up @@ -343,6 +355,7 @@ impl HttpServerFactory for AxumHttpServerFactory {
listen_addr.clone(),
router,
false,
http_config.clone(),
all_connections_stopped_sender.clone(),
);
(
Expand Down
15 changes: 5 additions & 10 deletions apollo-router/src/axum_factory/listeners.rs
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,7 @@ pub(super) fn serve_router_on_listen_addr(
address: ListenAddr,
router: axum::Router,
main_graphql_port: bool,
http_config: Http,
all_connections_stopped_sender: mpsc::Sender<()>,
) -> (impl Future<Output = Listener>, oneshot::Sender<()>) {
let (shutdown_sender, shutdown_receiver) = oneshot::channel::<()>();
Expand Down Expand Up @@ -243,6 +244,7 @@ pub(super) fn serve_router_on_listen_addr(
}

let address = address.clone();
let mut http_config = http_config.clone();
tokio::task::spawn(async move {
// this sender must be moved into the session to track that it is still running
let _connection_stop_signal = connection_stop_signal;
Expand All @@ -261,11 +263,8 @@ pub(super) fn serve_router_on_listen_addr(
.expect(
"this should not fail unless the socket is invalid",
);
let connection = Http::new()
.http1_keep_alive(true)
.http1_header_read_timeout(Duration::from_secs(10))
.serve_connection(stream, app);

let connection = http_config.serve_connection(stream, app);
tokio::pin!(connection);
tokio::select! {
// the connection finished first
Expand All @@ -291,9 +290,7 @@ pub(super) fn serve_router_on_listen_addr(
NetworkStream::Unix(stream) => {
let received_first_request = Arc::new(AtomicBool::new(false));
let app = IdleConnectionChecker::new(received_first_request.clone(), app);
let connection = Http::new()
.http1_keep_alive(true)
.serve_connection(stream, app);
let connection = http_config.serve_connection(stream, app);

tokio::pin!(connection);
tokio::select! {
Expand Down Expand Up @@ -329,9 +326,7 @@ pub(super) fn serve_router_on_listen_addr(
let protocol = stream.get_ref().1.alpn_protocol();
let http2 = protocol == Some(&b"h2"[..]);

let connection = Http::new()
.http1_keep_alive(true)
.http1_header_read_timeout(Duration::from_secs(10))
let connection = http_config
.http2_only(http2)
.serve_connection(stream, app);

Expand Down
21 changes: 21 additions & 0 deletions apollo-router/src/configuration/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -699,6 +699,19 @@ pub(crate) struct Supergraph {
/// Log a message if the client closes the connection before the response is sent.
/// Default: false.
pub(crate) experimental_log_on_broken_pipe: bool,

/// Set the maximum number of headers for HTTP1 protocol
///
/// If server receives more headers than the buffer size, it responds to the client with
/// "431 Request Header Fields Too Large".
///
/// Default is 100.
pub(crate) experimental_http1_max_headers: Option<usize>,

/// Set the maximum buffer size for the HTTP1 connection.
///
/// Default is ~400kb.
pub(crate) experimental_http1_max_buf_size: Option<usize>,
}

#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, JsonSchema)]
Expand Down Expand Up @@ -737,6 +750,8 @@ impl Supergraph {
generate_query_fragments: Option<bool>,
early_cancel: Option<bool>,
experimental_log_on_broken_pipe: Option<bool>,
experimental_http1_max_headers: Option<usize>,
experimental_http1_max_buf_size: Option<usize>,
) -> Self {
Self {
listen: listen.unwrap_or_else(default_graphql_listen),
Expand All @@ -756,6 +771,8 @@ impl Supergraph {
generate_query_fragments: generate_query_fragments.unwrap_or_default(),
early_cancel: early_cancel.unwrap_or_default(),
experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(),
experimental_http1_max_headers,
experimental_http1_max_buf_size,
}
}
}
Expand All @@ -774,6 +791,8 @@ impl Supergraph {
generate_query_fragments: Option<bool>,
early_cancel: Option<bool>,
experimental_log_on_broken_pipe: Option<bool>,
experimental_http1_max_headers: Option<usize>,
experimental_http1_max_buf_size: Option<usize>,
) -> Self {
Self {
listen: listen.unwrap_or_else(test_listen),
Expand All @@ -793,6 +812,8 @@ impl Supergraph {
generate_query_fragments: generate_query_fragments.unwrap_or_default(),
early_cancel: early_cancel.unwrap_or_default(),
experimental_log_on_broken_pipe: experimental_log_on_broken_pipe.unwrap_or_default(),
experimental_http1_max_headers,
experimental_http1_max_buf_size,
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6592,6 +6592,14 @@ expression: "&schema"
"description": "abort request handling when the client drops the connection. Default: false. When set to true, some parts of the request pipeline like telemetry will not work properly, but request handling will stop immediately when the client connection is closed.",
"type": "boolean"
},
"experimental_http1_max_headers": {
"default": null,
"description": "Set the maximum number of headers for HTTP1 protocol\n\nIf server receives more headers than the buffer size, it responds to the client with \"431 Request Header Fields Too Large\".\n\nDefault is 100.",
"format": "uint",
"minimum": 0.0,
"nullable": true,
"type": "integer"
},
"experimental_log_on_broken_pipe": {
"default": false,
"description": "Log a message if the client closes the connection before the response is sent. Default: false.",
Expand Down
1 change: 1 addition & 0 deletions apollo-router/tests/integration/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ mod operation_limits;
mod operation_name;
mod query_planner;
mod subgraph_response;
mod supergraph;
mod traffic_shaping;
mod typename;

Expand Down
119 changes: 119 additions & 0 deletions apollo-router/tests/integration/supergraph.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
use std::collections::HashMap;

use serde_json::json;
use tower::BoxError;

use crate::integration::IntegrationTest;

#[tokio::test(flavor = "multi_thread")]
async fn test_supergraph_errors_on_http1_max_headers() -> Result<(), BoxError> {
let mut router = IntegrationTest::builder()
.config(
r#"
supergraph:
experimental_http1_max_headers: 100
"#,
)
.build()
.await;

router.start().await;
router.assert_started().await;

let mut headers = HashMap::new();
for i in 0..100 {
headers.insert(format!("test-header-{i}"), format!("value_{i}"));
}

let (_trace_id, response) = router
.execute_query_with_headers(&json!({ "query": "{ __typename }"}), headers)
.await;
assert_eq!(response.status(), 431);
Ok(())
}

#[tokio::test(flavor = "multi_thread")]
async fn test_supergraph_allow_to_change_http1_max_headers() -> Result<(), BoxError> {
let mut router = IntegrationTest::builder()
.config(
r#"
supergraph:
experimental_http1_max_headers: 200
"#,
)
.build()
.await;

router.start().await;
router.assert_started().await;

let mut headers = HashMap::new();
for i in 0..100 {
headers.insert(format!("test-header-{i}"), format!("value_{i}"));
}

let (_trace_id, response) = router
.execute_query_with_headers(&json!({ "query": "{ __typename }"}), headers)
.await;
assert_eq!(response.status(), 200);
assert_eq!(
response.json::<serde_json::Value>().await?,
json!({ "data": { "__typename": "Query" } })
);
Ok(())
}

#[tokio::test(flavor = "multi_thread")]
async fn test_supergraph_allow_to_change_http1_max_buf_size() -> Result<(), BoxError> {
let mut router = IntegrationTest::builder()
.config(
r#"
supergraph:
experimental_http1_max_buf_size: 1000000
"#,
)
.build()
.await;

router.start().await;
router.assert_started().await;

let mut headers = HashMap::new();
headers.insert(format!("test-header"), "x".repeat(1048576 + 1));

let (_trace_id, response) = router
.execute_query_with_headers(&json!({ "query": "{ __typename }"}), headers)
.await;
assert_eq!(response.status(), 431);
Ok(())
}

#[tokio::test(flavor = "multi_thread")]
async fn test_supergraph_errors_on_http1_header_that_does_not_fit_inside_buffer(
) -> Result<(), BoxError> {
let mut router = IntegrationTest::builder()
.config(
r#"
supergraph:
experimental_http1_max_buf_size: 2000000
"#,
)
.build()
.await;

router.start().await;
router.assert_started().await;

let mut headers = HashMap::new();
headers.insert(format!("test-header"), "x".repeat(1048576 + 1));

let (_trace_id, response) = router
.execute_query_with_headers(&json!({ "query": "{ __typename }"}), headers)
.await;
assert_eq!(response.status(), 200);
assert_eq!(
response.json::<serde_json::Value>().await?,
json!({ "data": { "__typename": "Query" } })
);
Ok(())
}
Loading