diff --git a/apollo-router/src/axum_factory/tests.rs b/apollo-router/src/axum_factory/tests.rs index 9fe6f489cb..fa00789f61 100644 --- a/apollo-router/src/axum_factory/tests.rs +++ b/apollo-router/src/axum_factory/tests.rs @@ -2273,7 +2273,14 @@ async fn test_supergraph_timeout() { // we do the entire supergraph rebuilding instead of using `from_supergraph_mock_callback_and_configuration` // because we need the plugins to apply on the supergraph - let mut plugins = create_plugins(&conf, &schema, planner.subgraph_schemas(), None, None) + let subgraph_schemas = Arc::new( + planner + .subgraph_schemas() + .iter() + .map(|(k, v)| (k.clone(), v.schema.clone())) + .collect(), + ); + let mut plugins = create_plugins(&conf, &schema, subgraph_schemas, None, None) .await .unwrap(); diff --git a/apollo-router/src/plugin/mod.rs b/apollo-router/src/plugin/mod.rs index 9479e7f91a..f8750892c9 100644 --- a/apollo-router/src/plugin/mod.rs +++ b/apollo-router/src/plugin/mod.rs @@ -45,7 +45,6 @@ use tower::ServiceBuilder; use crate::graphql; use crate::layers::ServiceBuilderExt; use crate::notification::Notify; -use crate::query_planner::fetch::SubgraphSchemas; use crate::router_factory::Endpoint; use crate::services::execution; use crate::services::router; @@ -75,7 +74,7 @@ pub struct PluginInit { pub(crate) supergraph_schema: Arc>, /// The parsed subgraph schemas from the query planner, keyed by subgraph name - pub(crate) subgraph_schemas: Arc, + pub(crate) subgraph_schemas: Arc>>>, /// Launch ID pub(crate) launch_id: Option>, @@ -176,7 +175,7 @@ where supergraph_sdl: Arc, supergraph_schema_id: Arc, supergraph_schema: Arc>, - subgraph_schemas: Option>, + subgraph_schemas: Option>>>>, launch_id: Option>>, notify: Notify, ) -> Self { @@ -201,7 +200,7 @@ where supergraph_sdl: Arc, supergraph_schema_id: Arc, supergraph_schema: Arc>, - subgraph_schemas: Option>, + subgraph_schemas: Option>>>>, launch_id: Option>, notify: Notify, ) -> Result { @@ -224,7 +223,7 @@ where supergraph_sdl: Option>, supergraph_schema_id: Option>, supergraph_schema: Option>>, - subgraph_schemas: Option>, + subgraph_schemas: Option>>>>, launch_id: Option>, notify: Option>, ) -> Self { diff --git a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs index 7881b5abf8..686ade2eb6 100644 --- a/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs +++ b/apollo-router/src/plugins/demand_control/cost_calculator/static_cost.rs @@ -742,7 +742,7 @@ mod tests { let mut demand_controlled_subgraph_schemas = HashMap::new(); for (subgraph_name, subgraph_schema) in planner.subgraph_schemas().iter() { let demand_controlled_subgraph_schema = - DemandControlledSchema::new(subgraph_schema.clone()).unwrap(); + DemandControlledSchema::new(subgraph_schema.schema.clone()).unwrap(); demand_controlled_subgraph_schemas .insert(subgraph_name.to_string(), demand_controlled_subgraph_schema); } diff --git a/apollo-router/src/plugins/include_subgraph_errors.rs b/apollo-router/src/plugins/include_subgraph_errors.rs index 11fd59c2d9..2779758cbf 100644 --- a/apollo-router/src/plugins/include_subgraph_errors.rs +++ b/apollo-router/src/plugins/include_subgraph_errors.rs @@ -214,7 +214,13 @@ mod test { .await .unwrap(); let schema = planner.schema(); - let subgraph_schemas = planner.subgraph_schemas(); + let subgraph_schemas = Arc::new( + planner + .subgraph_schemas() + .iter() + .map(|(k, v)| (k.clone(), v.schema.clone())) + .collect(), + ); let builder = PluggableSupergraphServiceBuilder::new(planner); diff --git a/apollo-router/src/plugins/test.rs b/apollo-router/src/plugins/test.rs index 6120608f61..8b68888049 100644 --- a/apollo-router/src/plugins/test.rs +++ b/apollo-router/src/plugins/test.rs @@ -110,7 +110,12 @@ impl> + 'static> PluginTestHarness { .supergraph_schema_id(crate::spec::Schema::schema_id(&supergraph_sdl).into()) .supergraph_sdl(supergraph_sdl) .supergraph_schema(Arc::new(parsed_schema)) - .subgraph_schemas(subgraph_schemas) + .subgraph_schemas(Arc::new( + subgraph_schemas + .iter() + .map(|(k, v)| (k.clone(), v.schema.clone())) + .collect(), + )) .notify(Notify::default()) .build(); diff --git a/apollo-router/src/plugins/traffic_shaping/mod.rs b/apollo-router/src/plugins/traffic_shaping/mod.rs index cf6e0df18d..c0dffdcb6f 100644 --- a/apollo-router/src/plugins/traffic_shaping/mod.rs +++ b/apollo-router/src/plugins/traffic_shaping/mod.rs @@ -531,7 +531,13 @@ mod test { let planner = QueryPlannerService::new(schema.clone(), config.clone()) .await .unwrap(); - let subgraph_schemas = planner.subgraph_schemas(); + let subgraph_schemas = Arc::new( + planner + .subgraph_schemas() + .iter() + .map(|(k, v)| (k.clone(), v.schema.clone())) + .collect(), + ); let mut builder = PluggableSupergraphServiceBuilder::new(planner).with_configuration(config.clone()); diff --git a/apollo-router/src/query_planner/caching_query_planner.rs b/apollo-router/src/query_planner/caching_query_planner.rs index 58e01074df..90e88e04ab 100644 --- a/apollo-router/src/query_planner/caching_query_planner.rs +++ b/apollo-router/src/query_planner/caching_query_planner.rs @@ -1,10 +1,8 @@ -use std::collections::HashMap; use std::hash::Hash; use std::hash::Hasher; use std::sync::Arc; use std::task; -use apollo_compiler::validation::Valid; use futures::future::BoxFuture; use indexmap::IndexMap; use query_planner::QueryPlannerPlugin; @@ -61,7 +59,7 @@ pub(crate) struct CachingQueryPlanner { >, delegate: T, schema: Arc, - subgraph_schemas: Arc>>>, + subgraph_schemas: Arc, plugins: Arc, enable_authorization_directives: bool, config_mode_hash: Arc, @@ -94,7 +92,7 @@ where pub(crate) async fn new( delegate: T, schema: Arc, - subgraph_schemas: Arc>>>, + subgraph_schemas: Arc, configuration: &Configuration, plugins: Plugins, ) -> Result, BoxError> { @@ -339,9 +337,7 @@ where } impl CachingQueryPlanner { - pub(crate) fn subgraph_schemas( - &self, - ) -> Arc>>> { + pub(crate) fn subgraph_schemas(&self) -> Arc { self.delegate.subgraph_schemas() } diff --git a/apollo-router/src/query_planner/execution.rs b/apollo-router/src/query_planner/execution.rs index 12acef865b..6ea1e1c280 100644 --- a/apollo-router/src/query_planner/execution.rs +++ b/apollo-router/src/query_planner/execution.rs @@ -1,7 +1,6 @@ use std::collections::HashMap; use std::sync::Arc; -use apollo_compiler::validation::Valid; use futures::future::join_all; use futures::prelude::*; use tokio::sync::broadcast; @@ -23,6 +22,7 @@ use crate::json_ext::Path; use crate::json_ext::Value; use crate::json_ext::ValueExt; use crate::plugins::subscription::SubscriptionConfig; +use crate::query_planner::fetch::SubgraphSchemas; use crate::query_planner::FlattenNode; use crate::query_planner::Primary; use crate::query_planner::CONDITION_ELSE_SPAN_NAME; @@ -50,7 +50,7 @@ impl QueryPlan { service_factory: &'a Arc, supergraph_request: &'a Arc>, schema: &'a Arc, - subgraph_schemas: &'a Arc>>>, + subgraph_schemas: &'a Arc, sender: mpsc::Sender, subscription_handle: Option, subscription_config: &'a Option, @@ -106,7 +106,7 @@ pub(crate) struct ExecutionParameters<'a> { pub(crate) context: &'a Context, pub(crate) service_factory: &'a Arc, pub(crate) schema: &'a Arc, - pub(crate) subgraph_schemas: &'a Arc>>>, + pub(crate) subgraph_schemas: &'a Arc, pub(crate) supergraph_request: &'a Arc>, pub(crate) deferred_fetches: &'a HashMap)>>, pub(crate) query: &'a Arc, diff --git a/apollo-router/src/query_planner/fetch.rs b/apollo-router/src/query_planner/fetch.rs index 3ec89c1e95..b8d64fa2c5 100644 --- a/apollo-router/src/query_planner/fetch.rs +++ b/apollo-router/src/query_planner/fetch.rs @@ -1,10 +1,11 @@ -use std::collections::HashMap; use std::fmt::Display; use std::sync::Arc; use apollo_compiler::ast; +use apollo_compiler::collections::HashMap; use apollo_compiler::validation::Valid; use apollo_compiler::ExecutableDocument; +use apollo_compiler::Name; use indexmap::IndexSet; use serde::Deserialize; use serde::Serialize; @@ -93,7 +94,12 @@ impl From for OperationKind { } } -pub(crate) type SubgraphSchemas = HashMap>>; +pub(crate) type SubgraphSchemas = HashMap; + +pub(crate) struct SubgraphSchema { + pub(crate) schema: Arc>, + pub(crate) implementers_map: HashMap, +} /// A fetch node. #[derive(Debug, Clone, PartialEq, Deserialize, Serialize)] @@ -406,7 +412,7 @@ impl FetchNode { if let Some(subgraph_schema) = parameters.subgraph_schemas.get(&service_name.to_string()) { - match build_operation_with_aliasing(operation, &ctx_arg, subgraph_schema) { + match build_operation_with_aliasing(operation, &ctx_arg, &subgraph_schema.schema) { Ok(op) => { alias_query_string = op.serialize().no_indent().to_string(); alias_query_string.as_str() @@ -680,7 +686,7 @@ impl FetchNode { subgraph_schemas: &SubgraphSchemas, ) -> Result<(), ValidationErrors> { let schema = &subgraph_schemas[self.service_name.as_ref()]; - self.operation.init_parsed(schema)?; + self.operation.init_parsed(&schema.schema)?; Ok(()) } @@ -690,11 +696,12 @@ impl FetchNode { supergraph_schema_hash: &str, ) -> Result<(), ValidationErrors> { let schema = &subgraph_schemas[self.service_name.as_ref()]; - let doc = self.operation.init_parsed(schema)?; + let doc = self.operation.init_parsed(&schema.schema)?; if let Ok(hash) = QueryHashVisitor::hash_query( - schema, + &schema.schema, supergraph_schema_hash, + &schema.implementers_map, doc, self.operation_name.as_deref(), ) { diff --git a/apollo-router/src/query_planner/query_planner_service.rs b/apollo-router/src/query_planner/query_planner_service.rs index d94ce3ae0c..8be6890a69 100644 --- a/apollo-router/src/query_planner/query_planner_service.rs +++ b/apollo-router/src/query_planner/query_planner_service.rs @@ -1,6 +1,5 @@ //! Calls out to the apollo-federation crate -use std::collections::HashMap; use std::fmt::Debug; use std::ops::ControlFlow; use std::sync::Arc; @@ -9,7 +8,6 @@ use std::task::Poll; use std::time::Instant; use apollo_compiler::ast; -use apollo_compiler::validation::Valid; use apollo_compiler::Name; use apollo_federation::error::FederationError; use apollo_federation::error::SingleFederationError; @@ -42,6 +40,8 @@ use crate::plugins::telemetry::config::ApolloSignatureNormalizationAlgorithm; use crate::plugins::telemetry::config::Conf as TelemetryConfig; use crate::query_planner::convert::convert_root_query_plan_node; use crate::query_planner::fetch::QueryHash; +use crate::query_planner::fetch::SubgraphSchema; +use crate::query_planner::fetch::SubgraphSchemas; use crate::query_planner::labeler::add_defer_labels; use crate::services::layers::query_analysis::ParsedDocument; use crate::services::layers::query_analysis::ParsedDocumentInner; @@ -67,7 +67,7 @@ const INTERNAL_INIT_ERROR: &str = "internal"; pub(crate) struct QueryPlannerService { planner: Arc, schema: Arc, - subgraph_schemas: Arc>>>, + subgraph_schemas: Arc, configuration: Arc, enable_authorization_directives: bool, _federation_instrument: ObservableGauge, @@ -191,7 +191,15 @@ impl QueryPlannerService { planner .subgraph_schemas() .iter() - .map(|(name, schema)| (name.to_string(), Arc::new(schema.schema().clone()))) + .map(|(name, schema)| { + ( + name.to_string(), + SubgraphSchema { + implementers_map: schema.schema().implementers_map(), + schema: Arc::new(schema.schema().clone()), + }, + ) + }) .collect(), ); @@ -218,9 +226,7 @@ impl QueryPlannerService { self.schema.clone() } - pub(crate) fn subgraph_schemas( - &self, - ) -> Arc>>> { + pub(crate) fn subgraph_schemas(&self) -> Arc { self.subgraph_schemas.clone() } @@ -383,6 +389,7 @@ impl Service for QueryPlannerService { let hash = QueryHashVisitor::hash_query( this.schema.supergraph_schema(), &this.schema.raw_sdl, + &this.schema.implementers_map, &executable_document, operation_name.as_deref(), ) @@ -508,6 +515,7 @@ impl QueryPlannerService { let hash = QueryHashVisitor::hash_query( self.schema.supergraph_schema(), &self.schema.raw_sdl, + &self.schema.implementers_map, &executable_document, key.operation_name.as_deref(), ) @@ -595,6 +603,8 @@ pub(crate) fn metric_rust_qp_init(init_error_kind: Option<&'static str>) { #[cfg(test)] mod tests { + use std::collections::HashMap; + use test_log::test; use tower::ServiceExt; diff --git a/apollo-router/src/query_planner/subscription.rs b/apollo-router/src/query_planner/subscription.rs index 260f654c5d..f390ba1723 100644 --- a/apollo-router/src/query_planner/subscription.rs +++ b/apollo-router/src/query_planner/subscription.rs @@ -280,7 +280,7 @@ impl SubscriptionNode { subgraph_schemas: &SubgraphSchemas, ) -> Result<(), ValidationErrors> { let schema = &subgraph_schemas[self.service_name.as_ref()]; - self.operation.init_parsed(schema)?; + self.operation.init_parsed(&schema.schema)?; Ok(()) } } diff --git a/apollo-router/src/query_planner/tests.rs b/apollo-router/src/query_planner/tests.rs index b22204f7cd..d7cfaa4e36 100644 --- a/apollo-router/src/query_planner/tests.rs +++ b/apollo-router/src/query_planner/tests.rs @@ -1838,8 +1838,14 @@ fn broken_plan_does_not_panic() { estimated_size: Default::default(), }; let subgraph_schema = apollo_compiler::Schema::parse_and_validate(subgraph_schema, "").unwrap(); - let mut subgraph_schemas = HashMap::new(); - subgraph_schemas.insert("X".to_owned(), Arc::new(subgraph_schema)); + let mut subgraph_schemas = HashMap::default(); + subgraph_schemas.insert( + "X".to_owned(), + query_planner::fetch::SubgraphSchema { + implementers_map: subgraph_schema.implementers_map(), + schema: Arc::new(subgraph_schema), + }, + ); let result = Arc::make_mut(&mut plan.root) .init_parsed_operations_and_hash_subqueries(&subgraph_schemas, ""); assert_eq!( diff --git a/apollo-router/src/router_factory.rs b/apollo-router/src/router_factory.rs index 0a766a2dc0..49b792e083 100644 --- a/apollo-router/src/router_factory.rs +++ b/apollo-router/src/router_factory.rs @@ -326,11 +326,19 @@ impl YamlRouterFactory { let span = tracing::info_span!("plugins"); // Process the plugins. + let subgraph_schemas = Arc::new( + planner + .subgraph_schemas() + .iter() + .map(|(k, v)| (k.clone(), v.schema.clone())) + .collect(), + ); + let plugins: Arc = Arc::new( create_plugins( &configuration, &schema, - planner.subgraph_schemas(), + subgraph_schemas, initial_telemetry_plugin, extra_plugins, ) diff --git a/apollo-router/src/services/execution/service.rs b/apollo-router/src/services/execution/service.rs index de6057f287..e793178c62 100644 --- a/apollo-router/src/services/execution/service.rs +++ b/apollo-router/src/services/execution/service.rs @@ -1,6 +1,5 @@ //! Implements the Execution phase of the request lifecycle. -use std::collections::HashMap; use std::future::ready; use std::pin::Pin; use std::sync::Arc; @@ -9,7 +8,6 @@ use std::task::Poll; use std::time::SystemTime; use std::time::UNIX_EPOCH; -use apollo_compiler::validation::Valid; use futures::future::BoxFuture; use futures::stream::once; use futures::Stream; @@ -47,6 +45,7 @@ use crate::plugins::subscription::APOLLO_SUBSCRIPTION_PLUGIN; use crate::plugins::telemetry::apollo::Config as ApolloTelemetryConfig; use crate::plugins::telemetry::config::ApolloMetricsReferenceMode; use crate::plugins::telemetry::Telemetry; +use crate::query_planner::fetch::SubgraphSchemas; use crate::query_planner::subscription::SubscriptionHandle; use crate::services::execution; use crate::services::new_service::ServiceFactory; @@ -62,7 +61,7 @@ use crate::spec::Schema; #[derive(Clone)] pub(crate) struct ExecutionService { pub(crate) schema: Arc, - pub(crate) subgraph_schemas: Arc>>>, + pub(crate) subgraph_schemas: Arc, pub(crate) subgraph_service_factory: Arc, /// Subscription config if enabled subscription_config: Option, @@ -632,7 +631,7 @@ async fn consume_responses( #[derive(Clone)] pub(crate) struct ExecutionServiceFactory { pub(crate) schema: Arc, - pub(crate) subgraph_schemas: Arc>>>, + pub(crate) subgraph_schemas: Arc, pub(crate) plugins: Arc, pub(crate) subgraph_service_factory: Arc, } diff --git a/apollo-router/src/services/supergraph/service.rs b/apollo-router/src/services/supergraph/service.rs index 939e48093e..ac5fbd1295 100644 --- a/apollo-router/src/services/supergraph/service.rs +++ b/apollo-router/src/services/supergraph/service.rs @@ -553,7 +553,14 @@ async fn subscription_task( // If the configuration was dropped in the meantime, we ignore this update and will // pick up the next one. if let Some(conf) = new_configuration.upgrade() { - let plugins = match create_plugins(&conf, &execution_service_factory.schema, execution_service_factory.subgraph_schemas.clone(), None, None).await { + let subgraph_schemas = Arc::new( + execution_service_factory + .subgraph_schemas + .iter() + .map(|(k, v)| (k.clone(), v.schema.clone())) + .collect(), + ); + let plugins = match create_plugins(&conf, &execution_service_factory.schema, subgraph_schemas, None, None).await { Ok(plugins) => Arc::new(plugins), Err(err) => { tracing::error!("cannot re-create plugins with the new configuration (closing existing subscription): {err:?}"); diff --git a/apollo-router/src/spec/query.rs b/apollo-router/src/spec/query.rs index 4f83d7c8e3..6ddf3ac1df 100644 --- a/apollo-router/src/spec/query.rs +++ b/apollo-router/src/spec/query.rs @@ -269,6 +269,7 @@ impl Query { let hash = QueryHashVisitor::hash_query( schema.supergraph_schema(), &schema.raw_sdl, + &schema.implementers_map, &executable_document, operation_name, ) @@ -323,10 +324,13 @@ impl Query { let operation = get_operation(document, operation_name)?; let operation = Operation::from_hir(&operation, schema, &mut defer_stats, &fragments)?; - let mut visitor = - QueryHashVisitor::new(schema.supergraph_schema(), &schema.raw_sdl, document).map_err( - |e| SpecError::QueryHashing(format!("could not calculate the query hash: {e}")), - )?; + let mut visitor = QueryHashVisitor::new( + schema.supergraph_schema(), + &schema.raw_sdl, + &schema.implementers_map, + document, + ) + .map_err(|e| SpecError::QueryHashing(format!("could not calculate the query hash: {e}")))?; traverse::document(&mut visitor, document, operation_name).map_err(|e| { SpecError::QueryHashing(format!("could not calculate the query hash: {e}")) })?; diff --git a/apollo-router/src/spec/query/change.rs b/apollo-router/src/spec/query/change.rs index 79e6369499..e8da0caa39 100644 --- a/apollo-router/src/spec/query/change.rs +++ b/apollo-router/src/spec/query/change.rs @@ -39,13 +39,13 @@ //! //! This prevents possible collision while hashing multiple things in a sequence. The //! `^` character cannot be present in GraphQL names, so this is a good separator. -use std::collections::HashMap; -use std::collections::HashSet; use std::hash::Hash; use std::hash::Hasher; use apollo_compiler::ast; use apollo_compiler::ast::FieldDefinition; +use apollo_compiler::collections::HashMap; +use apollo_compiler::collections::HashSet; use apollo_compiler::executable; use apollo_compiler::parser::Parser; use apollo_compiler::schema; @@ -79,6 +79,7 @@ pub(crate) struct QueryHashVisitor<'a> { // For now, introspection is still handled by the planner, so when an // introspection query is hashed, it should take the whole schema into account schema_str: &'a str, + implementers_map: &'a HashMap, hasher: Sha256, fragments: HashMap<&'a Name, &'a Node>, hashed_types: HashSet, @@ -95,15 +96,17 @@ impl<'a> QueryHashVisitor<'a> { pub(crate) fn new( schema: &'a schema::Schema, schema_str: &'a str, + implementers_map: &'a HashMap, executable: &'a executable::ExecutableDocument, ) -> Result { let mut visitor = Self { schema, schema_str, + implementers_map, hasher: Sha256::new(), fragments: executable.fragments.iter().collect(), - hashed_types: HashSet::new(), - hashed_field_definitions: HashSet::new(), + hashed_types: HashSet::default(), + hashed_field_definitions: HashSet::default(), seen_introspection: false, // should we just return an error if we do not find those directives? join_field_directive_name: Schema::directive_name( @@ -124,7 +127,7 @@ impl<'a> QueryHashVisitor<'a> { ">=0.1.0", CONTEXT_DIRECTIVE_NAME, ), - contexts: HashMap::new(), + contexts: HashMap::default(), }; visitor.hash_schema()?; @@ -147,10 +150,11 @@ impl<'a> QueryHashVisitor<'a> { pub(crate) fn hash_query( schema: &'a schema::Schema, schema_str: &'a str, + implementers_map: &'a HashMap, executable: &'a executable::ExecutableDocument, operation_name: Option<&str>, ) -> Result, BoxError> { - let mut visitor = QueryHashVisitor::new(schema, schema_str, executable)?; + let mut visitor = QueryHashVisitor::new(schema, schema_str, implementers_map, executable)?; traverse::document(&mut visitor, executable, operation_name)?; // hash the entire query string to prevent collisions executable.to_string().hash(&mut visitor); @@ -326,7 +330,7 @@ impl<'a> QueryHashVisitor<'a> { } "^IMPLEMENTED_INTERFACES_LIST_END".hash(self); - if let Some(implementers) = self.schema().implementers_map().get(&i.name) { + if let Some(implementers) = self.implementers_map.get(&i.name) { "^IMPLEMENTER_OBJECT_LIST".hash(self); for object in &implementers.objects { @@ -651,7 +655,7 @@ impl<'a> QueryHashVisitor<'a> { ) -> Result<(), BoxError> { "^INTERFACE_IMPL".hash(self); - if let Some(implementers) = self.schema.implementers_map().get(&intf.name) { + if let Some(implementers) = self.implementers_map.get(&intf.name) { "^IMPLEMENTER_LIST".hash(self); for object in &implementers.objects { self.hash_type_by_name(object)?; @@ -840,12 +844,17 @@ mod tests { .unwrap() .validate(&schema) .unwrap(); - let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec).unwrap(); + let implementers_map = schema.implementers_map(); + let mut visitor = + QueryHashVisitor::new(&schema, schema_str, &implementers_map, &exec).unwrap(); traverse::document(&mut visitor, &exec, None).unwrap(); ( hex::encode(visitor.finish()), - hex::encode(QueryHashVisitor::hash_query(&schema, schema_str, &exec, None).unwrap()), + hex::encode( + QueryHashVisitor::hash_query(&schema, schema_str, &implementers_map, &exec, None) + .unwrap(), + ), ) .into() } @@ -859,7 +868,9 @@ mod tests { .unwrap() .validate(&schema) .unwrap(); - let mut visitor = QueryHashVisitor::new(&schema, schema_str, &exec).unwrap(); + let implementers_map = schema.implementers_map(); + let mut visitor = + QueryHashVisitor::new(&schema, schema_str, &implementers_map, &exec).unwrap(); traverse::document(&mut visitor, &exec, None).unwrap(); hex::encode(visitor.finish())