Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

do not hash the entire schema on every query plan cache lookup #5374

Merged
merged 4 commits into from
Jun 7, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions .changesets/fix_geal_schema_hash.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
### do not hash the entire schema on every query plan cache lookup ([PR #5374](https://github.com/apollographql/router/pull/5374))

This fixes performance issues when looking up query plans for large schemas.

⚠️ Because this feature changes the query plan cache key, distributed caches will need to be repopulated.

By [@Geal](https://github.com/Geal) in https://github.com/apollographql/router/pull/5374
12 changes: 6 additions & 6 deletions apollo-router/src/query_planner/caching_query_planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ where
metadata,
plan_options,
config_mode: _,
sdl: _,
schema_id: _,
introspection: _,
},
_,
Expand Down Expand Up @@ -261,7 +261,7 @@ where
query: query.clone(),
operation: operation.clone(),
hash: doc.hash.clone(),
sdl: Arc::clone(&self.schema.raw_sdl),
schema_id: Arc::clone(&self.schema.hash),
Copy link
Member

@IvanGoncharov IvanGoncharov Jun 7, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: if schema_id contains schema.hash maybe it should be called schema_hash.

just thinking out loud 💭

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I renamed it to schema_id everywhere in fa19613 since we were already naming it that way elsewhere

metadata,
plan_options,
config_mode: self.config_mode.clone(),
Expand Down Expand Up @@ -449,7 +449,7 @@ where
query: request.query.clone(),
operation: request.operation_name.to_owned(),
hash: doc.hash.clone(),
sdl: Arc::clone(&self.schema.raw_sdl),
schema_id: Arc::clone(&self.schema.hash),
metadata,
plan_options,
config_mode: self.config_mode.clone(),
Expand Down Expand Up @@ -593,7 +593,7 @@ fn stats_report_key_hash(stats_report_key: &str) -> String {
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct CachingQueryKey {
pub(crate) query: String,
pub(crate) sdl: Arc<String>,
pub(crate) schema_id: Arc<String>,
pub(crate) operation: Option<String>,
pub(crate) hash: Arc<QueryHash>,
pub(crate) metadata: CacheKeyMetadata,
Expand All @@ -620,7 +620,7 @@ impl std::fmt::Display for CachingQueryKey {
);
hasher
.update(&serde_json::to_vec(&self.config_mode).expect("serialization should not fail"));
hasher.update(&serde_json::to_vec(&self.sdl).expect("serialization should not fail"));
hasher.update(&*self.schema_id);
hasher.update([self.introspection as u8]);
let metadata = hex::encode(hasher.finalize());

Expand All @@ -634,7 +634,7 @@ impl std::fmt::Display for CachingQueryKey {

impl Hash for CachingQueryKey {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.sdl.hash(state);
self.schema_id.hash(state);
self.hash.0.hash(state);
self.operation.hash(state);
self.metadata.hash(state);
Expand Down
5 changes: 5 additions & 0 deletions apollo-router/src/spec/schema.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ pub(crate) struct Schema {
subgraphs: HashMap<String, Uri>,
pub(crate) implementers_map: HashMap<ast::Name, Implementers>,
api_schema: Option<ApiSchema>,
pub(crate) hash: Arc<String>,
}

/// TODO: remove and use apollo_federation::Supergraph unconditionally
Expand Down Expand Up @@ -128,12 +129,15 @@ impl Schema {
Supergraph::ApolloCompiler(definitions)
};

let hash = Arc::new(Schema::schema_id(sdl));

Ok(Schema {
raw_sdl: Arc::new(sdl.to_owned()),
supergraph,
subgraphs,
implementers_map,
api_schema: None,
hash,
})
}

Expand Down Expand Up @@ -377,6 +381,7 @@ impl std::fmt::Debug for Schema {
subgraphs,
implementers_map,
api_schema: _, // skip
hash: _,
} = self;
f.debug_struct("Schema")
.field("raw_sdl", raw_sdl)
Expand Down
16 changes: 8 additions & 8 deletions apollo-router/tests/integration/redis.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,13 @@ use crate::integration::common::graph_os_enabled;
use crate::integration::IntegrationTest;

#[tokio::test(flavor = "multi_thread")]
async fn query_planner() -> Result<(), BoxError> {
async fn query_planner_cache() -> Result<(), BoxError> {
// If this test fails and the cache key format changed you'll need to update the key here.
// 1. Force this test to run locally by removing the cfg() line at the top of this file.
// 2. run `docker compose up -d` and connect to the redis container by running `docker-compose exec redis /bin/bash`.
// 3. Run the `redis-cli` command from the shell and start the redis `monitor` command.
// 4. Run this test and yank the updated cache key from the redis logs.
let known_cache_key = "plan:0:v2.8.0:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:9c26cb1f820a78848ba3d5d3295c16aa971368c5295422fd33cc19d4a6006a9c";
let known_cache_key = "plan:0:v2.8.0:16385ebef77959fcdc520ad507eb1f7f7df28f1d54a0569e3adabcb4cd00d7ce:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3106dfc3339d8c3f3020434024bff0f566a8be5995199954db5a7525a7d7e67a";

let config = RedisConfig::from_url("redis://127.0.0.1:6379").unwrap();
let client = RedisClient::new(config, None, None, None);
Expand Down Expand Up @@ -903,7 +903,7 @@ async fn connection_failure_blocks_startup() {
async fn query_planner_redis_update_query_fragments() {
test_redis_query_plan_config_update(
include_str!("fixtures/query_planner_redis_config_update_query_fragments.router.yaml"),
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:ae8b525534cb7446a34715fc80edd41d4d29aa65c5f39f9237d4ed8459e3fe82",
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:9054d19854e1d9e282ac7645c612bc70b8a7143d43b73d44dade4a5ec43938b4",
)
.await;
}
Expand All @@ -922,7 +922,7 @@ async fn query_planner_redis_update_planner_mode() {
async fn query_planner_redis_update_introspection() {
test_redis_query_plan_config_update(
include_str!("fixtures/query_planner_redis_config_update_introspection.router.yaml"),
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:1910d63916aae7a1066cb8c7d622fc3a8e363ed1b6ac8e214deed4046abae85c",
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:04b3051125b5994fba6b0a22b2d8b4246cadc145be030c491a3431655d2ba07a",
)
.await;
}
Expand All @@ -931,7 +931,7 @@ async fn query_planner_redis_update_introspection() {
async fn query_planner_redis_update_defer() {
test_redis_query_plan_config_update(
include_str!("fixtures/query_planner_redis_config_update_defer.router.yaml"),
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:8a17c5b196af5e3a18d24596424e9849d198f456dd48297b852a5f2ca847169b",
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:3b7241b0db2cd878b79c0810121953ba544543f3cb2692aaf1a59184470747b0",
)
.await;
}
Expand All @@ -942,7 +942,7 @@ async fn query_planner_redis_update_type_conditional_fetching() {
include_str!(
"fixtures/query_planner_redis_config_update_type_conditional_fetching.router.yaml"
),
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:275f78612ed3d45cdf6bf328ef83e368b5a44393bd8c944d4a7d694aed61f017",
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:0ca695a8c4c448b65fa04229c663f44150af53b184ebdcbb0ad6862290efed76",
)
.await;
}
Expand All @@ -953,7 +953,7 @@ async fn query_planner_redis_update_reuse_query_fragments() {
include_str!(
"fixtures/query_planner_redis_config_update_reuse_query_fragments.router.yaml"
),
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:15fbb62c94e8da6ea78f28a6eb86a615dcaf27ff6fd0748fac4eb614b0b17662",
"plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:f7c04319556397ec4b550aa5aaa96c73689cee09026b661b6a9fc20b49e6fa77",
)
.await;
}
Expand All @@ -976,7 +976,7 @@ async fn test_redis_query_plan_config_update(updated_config: &str, new_cache_key
router.assert_started().await;
router.clear_redis_cache().await;

let starting_key = "plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:1910d63916aae7a1066cb8c7d622fc3a8e363ed1b6ac8e214deed4046abae85c";
let starting_key = "plan:0:v2.8.0:a9e605fa09adc5a4b824e690b4de6f160d47d84ede5956b58a7d300cca1f7204:3973e022e93220f9212c18d0d0c543ae7c309e46640da93a4a0314de999f5112:4a5827854a6d2efc85045f0d5bede402e15958390f1073d2e77df56188338e5a";
router.execute_default_query().await;
router.assert_redis_cache_contains(starting_key, None).await;
router.update_config(updated_config).await;
Expand Down
Loading