diff --git a/ee/acl/acl_test.go b/ee/acl/acl_test.go index aa33568d4ae..18543cb01ae 100644 --- a/ee/acl/acl_test.go +++ b/ee/acl/acl_test.go @@ -166,6 +166,7 @@ func TestInvalidGetUser(t *testing.T) { require.Equal(t, x.GqlErrorList{{ Message: "couldn't rewrite query getCurrentUser because unable to parse jwt token: token" + " contains an invalid number of segments", + Path: []interface{}{"getCurrentUser"}, }}, currentUser.Errors) } @@ -196,7 +197,7 @@ func TestPasswordReturn(t *testing.T) { func TestGetCurrentUser(t *testing.T) { token := testutil.GrootHttpLogin(adminEndpoint) - + currentUser := getCurrentUser(t, token) currentUser.RequireNoGraphQLErrors(t) require.Equal(t, string(currentUser.Data), `{"getCurrentUser":{"name":"groot"}}`) @@ -231,6 +232,7 @@ func TestCreateAndDeleteUsers(t *testing.T) { require.Equal(t, x.GqlErrorList{{ Message: "couldn't rewrite query for mutation addUser because id alice already exists" + " for type User", + Path: []interface{}{"addUser"}, }}, resp.Errors) checkUserCount(t, resp.Data, 0) diff --git a/graphql/admin/admin.go b/graphql/admin/admin.go index 03a43918730..d9eda4de5bd 100644 --- a/graphql/admin/admin.go +++ b/graphql/admin/admin.go @@ -347,11 +347,11 @@ var ( resolve.LoggingMWMutation, } adminQueryMWConfig = map[string]resolve.QueryMiddlewares{ - "health": {resolve.IpWhitelistingMW4Query, resolve.LoggingMWQuery}, // dgraph checks Guardian auth for health - "state": {resolve.IpWhitelistingMW4Query, resolve.LoggingMWQuery}, // dgraph checks Guardian auth for state - "config": commonAdminQueryMWs, - "listBackups": commonAdminQueryMWs, - "getGQLSchema": commonAdminQueryMWs, + "health": {resolve.IpWhitelistingMW4Query, resolve.LoggingMWQuery}, // dgraph checks Guardian auth for health + "state": {resolve.IpWhitelistingMW4Query, resolve.LoggingMWQuery}, // dgraph checks Guardian auth for state + "config": commonAdminQueryMWs, + "listBackups": commonAdminQueryMWs, + "getGQLSchema": commonAdminQueryMWs, // for queries and mutations related to User/Group, dgraph handles Guardian auth, // so no need to apply GuardianAuth Middleware "queryGroup": {resolve.IpWhitelistingMW4Query, resolve.LoggingMWQuery}, @@ -716,113 +716,64 @@ func (as *adminServer) addConnectedAdminResolvers() { as.rf.WithMutationResolver("updateGQLSchema", func(m schema.Mutation) resolve.MutationResolver { - return &updateSchemaResolver{ - admin: as, - } + return &updateSchemaResolver{admin: as} }). WithQueryResolver("getGQLSchema", func(q schema.Query) resolve.QueryResolver { - getResolver := &getSchemaResolver{ - admin: as, - } - - return resolve.NewQueryResolver( - getResolver, - getResolver, - resolve.StdQueryCompletion()) + return &getSchemaResolver{admin: as} }). WithQueryResolver("queryGroup", func(q schema.Query) resolve.QueryResolver { - return resolve.NewQueryResolver( - qryRw, - dgEx, - resolve.StdQueryCompletion()) + return resolve.NewQueryResolver(qryRw, dgEx) }). WithQueryResolver("queryUser", func(q schema.Query) resolve.QueryResolver { - return resolve.NewQueryResolver( - qryRw, - dgEx, - resolve.StdQueryCompletion()) + return resolve.NewQueryResolver(qryRw, dgEx) }). WithQueryResolver("getGroup", func(q schema.Query) resolve.QueryResolver { - return resolve.NewQueryResolver( - qryRw, - dgEx, - resolve.StdQueryCompletion()) + return resolve.NewQueryResolver(qryRw, dgEx) }). WithQueryResolver("getCurrentUser", func(q schema.Query) resolve.QueryResolver { - cuResolver := ¤tUserResolver{ - baseRewriter: qryRw, - } - - return resolve.NewQueryResolver( - cuResolver, - dgEx, - resolve.StdQueryCompletion()) + return resolve.NewQueryResolver(¤tUserResolver{baseRewriter: qryRw}, dgEx) }). WithQueryResolver("getUser", func(q schema.Query) resolve.QueryResolver { - return resolve.NewQueryResolver( - qryRw, - dgEx, - resolve.StdQueryCompletion()) + return resolve.NewQueryResolver(qryRw, dgEx) }). WithQueryResolver("getAllowedCORSOrigins", func(q schema.Query) resolve.QueryResolver { return resolve.QueryResolverFunc(resolveGetCors) }). WithQueryResolver("querySchemaHistory", func(q schema.Query) resolve.QueryResolver { - // Add the desceding order to the created_at to get the schema history in + // Add the descending order to the created_at to get the schema history in // descending order. q.Arguments()["order"] = map[string]interface{}{"desc": "created_at"} - return resolve.NewQueryResolver( - qryRw, - dgEx, - resolve.StdQueryCompletion()) + return resolve.NewQueryResolver(qryRw, dgEx) }). WithMutationResolver("addUser", func(m schema.Mutation) resolve.MutationResolver { - return resolve.NewDgraphResolver( - resolve.NewAddRewriter(), - dgEx, - resolve.StdMutationCompletion(m.Name())) + return resolve.NewDgraphResolver(resolve.NewAddRewriter(), dgEx) }). WithMutationResolver("addGroup", func(m schema.Mutation) resolve.MutationResolver { - return resolve.NewDgraphResolver( - NewAddGroupRewriter(), - dgEx, - resolve.StdMutationCompletion(m.Name())) + return resolve.NewDgraphResolver(NewAddGroupRewriter(), dgEx) }). WithMutationResolver("updateUser", func(m schema.Mutation) resolve.MutationResolver { - return resolve.NewDgraphResolver( - resolve.NewUpdateRewriter(), - dgEx, - resolve.StdMutationCompletion(m.Name())) + return resolve.NewDgraphResolver(resolve.NewUpdateRewriter(), dgEx) }). WithMutationResolver("updateGroup", func(m schema.Mutation) resolve.MutationResolver { - return resolve.NewDgraphResolver( - NewUpdateGroupRewriter(), - dgEx, - resolve.StdMutationCompletion(m.Name())) + return resolve.NewDgraphResolver(NewUpdateGroupRewriter(), dgEx) }). WithMutationResolver("deleteUser", func(m schema.Mutation) resolve.MutationResolver { - return resolve.NewDgraphResolver( - resolve.NewDeleteRewriter(), - dgEx, - resolve.StdDeleteCompletion(m.Name())) + return resolve.NewDgraphResolver(resolve.NewDeleteRewriter(), dgEx) }). WithMutationResolver("deleteGroup", func(m schema.Mutation) resolve.MutationResolver { - return resolve.NewDgraphResolver( - resolve.NewDeleteRewriter(), - dgEx, - resolve.StdDeleteCompletion(m.Name())) + return resolve.NewDgraphResolver(resolve.NewDeleteRewriter(), dgEx) }). WithMutationResolver("replaceAllowedCORSOrigins", func(m schema.Mutation) resolve.MutationResolver { return resolve.MutationResolverFunc(resolveReplaceAllowedCORSOrigins) diff --git a/graphql/admin/backup.go b/graphql/admin/backup.go index 6634f986adf..a5d51f7f4ad 100644 --- a/graphql/admin/backup.go +++ b/graphql/admin/backup.go @@ -52,10 +52,11 @@ func resolveBackup(ctx context.Context, m schema.Mutation) (*resolve.Resolved, b return resolve.EmptyResult(m, err), false } - return &resolve.Resolved{ - Data: map[string]interface{}{m.Name(): response("Success", "Backup completed.")}, - Field: m, - }, true + return resolve.DataResult( + m, + map[string]interface{}{m.Name(): response("Success", "Backup completed.")}, + nil, + ), true } func getBackupInput(m schema.Mutation) (*backupInput, error) { diff --git a/graphql/admin/config.go b/graphql/admin/config.go index b4001c6ab20..4f86bf6fa78 100644 --- a/graphql/admin/config.go +++ b/graphql/admin/config.go @@ -19,6 +19,7 @@ package admin import ( "context" "encoding/json" + "strconv" "github.com/dgraph-io/dgraph/graphql/resolve" "github.com/dgraph-io/dgraph/graphql/schema" @@ -54,22 +55,23 @@ func resolveUpdateConfig(ctx context.Context, m schema.Mutation) (*resolve.Resol worker.UpdateLogRequest(*input.LogRequest) } - return &resolve.Resolved{ - Data: map[string]interface{}{m.Name(): response("Success", "Config updated successfully")}, - Field: m, - }, true + return resolve.DataResult( + m, + map[string]interface{}{m.Name(): response("Success", "Config updated successfully")}, + nil, + ), true } func resolveGetConfig(ctx context.Context, q schema.Query) *resolve.Resolved { glog.Info("Got config query through GraphQL admin API") - conf := make(map[string]interface{}) - conf["cacheMb"] = float64(worker.Config.CacheMb) - - return &resolve.Resolved{ - Data: map[string]interface{}{q.Name(): conf}, - Field: q, - } + return resolve.DataResult( + q, + map[string]interface{}{q.Name(): map[string]interface{}{ + "cacheMb": json.Number(strconv.FormatInt(worker.Config.CacheMb, 10)), + }}, + nil, + ) } diff --git a/graphql/admin/cors.go b/graphql/admin/cors.go index dc3a9d09311..646e664a14d 100644 --- a/graphql/admin/cors.go +++ b/graphql/admin/cors.go @@ -47,23 +47,23 @@ func resolveReplaceAllowedCORSOrigins(ctx context.Context, m schema.Mutation) (* if err != nil { return resolve.EmptyResult(m, err), false } - // Aleast one origin is required to add allowlist. Since, no origin is provided, so we'll - // all origin to access dgraph. + // At-least one origin is required to add allowList. Since, no origin is provided, so we'll + // allow all origin to access dgraph. if len(origins) == 0 { origins = append(origins, "*") } if err = edgraph.AddCorsOrigins(ctx, origins); err != nil { return resolve.EmptyResult(m, err), false } - return &resolve.Resolved{ - Data: map[string]interface{}{ + return resolve.DataResult( + m, + map[string]interface{}{ m.Name(): map[string]interface{}{ - "acceptedOrigins": arrayToInterface(origins), + "acceptedOrigins": toInterfaceSlice(origins), }, }, - Field: m, - Err: nil, - }, true + nil, + ), true } // resolveGetCors retrieves cors details from the database. @@ -72,21 +72,12 @@ func resolveGetCors(ctx context.Context, q schema.Query) *resolve.Resolved { if err != nil { return resolve.EmptyResult(q, err) } - return &resolve.Resolved{ - Data: map[string]interface{}{ + return resolve.DataResult( + q, + map[string]interface{}{ q.Name(): map[string]interface{}{ - "acceptedOrigins": arrayToInterface(origins), + "acceptedOrigins": toInterfaceSlice(origins), }, }, - Field: q, - } -} - -// arrayToInterface convers array string to array interface -func arrayToInterface(in []string) []interface{} { - out := make([]interface{}, len(in)) - for i, v := range in { - out[i] = v - } - return out + nil) } diff --git a/graphql/admin/draining.go b/graphql/admin/draining.go index 615b918a594..9deb8df889a 100644 --- a/graphql/admin/draining.go +++ b/graphql/admin/draining.go @@ -32,11 +32,13 @@ func resolveDraining(ctx context.Context, m schema.Mutation) (*resolve.Resolved, enable := getDrainingInput(m) x.UpdateDrainingMode(enable) - return &resolve.Resolved{ - Data: map[string]interface{}{ - m.Name(): response("Success", fmt.Sprintf("draining mode has been set to %v", enable))}, - Field: m, - }, true + return resolve.DataResult( + m, + map[string]interface{}{ + m.Name(): response("Success", fmt.Sprintf("draining mode has been set to %v", enable)), + }, + nil, + ), true } func getDrainingInput(m schema.Mutation) bool { diff --git a/graphql/admin/export.go b/graphql/admin/export.go index daf80931202..358bc316c0c 100644 --- a/graphql/admin/export.go +++ b/graphql/admin/export.go @@ -63,20 +63,22 @@ func resolveExport(ctx context.Context, m schema.Mutation) (*resolve.Resolved, b } responseData := response("Success", "Export completed.") - responseData["exportedFiles"] = toGraphQLArray(files) + responseData["exportedFiles"] = toInterfaceSlice(files) - return &resolve.Resolved{ - Data: map[string]interface{}{m.Name(): responseData}, - Field: m, - }, true + return resolve.DataResult( + m, + map[string]interface{}{m.Name(): responseData}, + nil, + ), true } -func toGraphQLArray(s []string) []interface{} { - outputFiles := make([]interface{}, 0, len(s)) - for _, f := range s { - outputFiles = append(outputFiles, f) +// toInterfaceSlice converts []string to []interface{} +func toInterfaceSlice(in []string) []interface{} { + out := make([]interface{}, 0, len(in)) + for _, s := range in { + out = append(out, s) } - return outputFiles + return out } func getExportInput(m schema.Mutation) (*exportInput, error) { diff --git a/graphql/admin/health.go b/graphql/admin/health.go index 30cf6d2b845..d35a04ad421 100644 --- a/graphql/admin/health.go +++ b/graphql/admin/health.go @@ -18,7 +18,6 @@ package admin import ( "context" - "encoding/json" "github.com/dgraph-io/dgraph/edgraph" "github.com/dgraph-io/dgraph/graphql/resolve" @@ -37,12 +36,11 @@ func resolveHealth(ctx context.Context, q schema.Query) *resolve.Resolved { } var health []map[string]interface{} - err = json.Unmarshal(resp.GetJson(), &health) - - return &resolve.Resolved{ - Data: map[string]interface{}{q.Name(): health}, - Field: q, - Err: err, - } + err = resolve.Unmarshal(resp.GetJson(), &health) + return resolve.DataResult( + q, + map[string]interface{}{q.Name(): health}, + err, + ) } diff --git a/graphql/admin/list_backups.go b/graphql/admin/list_backups.go index e550e27a491..18a2b4210e4 100644 --- a/graphql/admin/list_backups.go +++ b/graphql/admin/list_backups.go @@ -76,17 +76,18 @@ func resolveListBackups(ctx context.Context, q schema.Query) *resolve.Resolved { return resolve.EmptyResult(q, err) } var result map[string]interface{} - err = json.Unmarshal(b, &result) + err = resolve.Unmarshal(b, &result) if err != nil { return resolve.EmptyResult(q, err) } results = append(results, result) } - return &resolve.Resolved{ - Data: map[string]interface{}{q.Name(): results}, - Field: q, - } + return resolve.DataResult( + q, + map[string]interface{}{q.Name(): results}, + nil, + ) } func getLsBackupInput(q schema.Query) (*lsBackupInput, error) { diff --git a/graphql/admin/login.go b/graphql/admin/login.go index f9c8967d160..fe953ac5e49 100644 --- a/graphql/admin/login.go +++ b/graphql/admin/login.go @@ -50,15 +50,15 @@ func resolveLogin(ctx context.Context, m schema.Mutation) (*resolve.Resolved, bo return resolve.EmptyResult(m, err), false } - return &resolve.Resolved{ - Data: map[string]interface{}{ + return resolve.DataResult( + m, + map[string]interface{}{ m.Name(): map[string]interface{}{ "response": map[string]interface{}{ "accessJWT": jwt.AccessJwt, "refreshJWT": jwt.RefreshJwt}}}, - Field: m, - Err: nil, - }, true + nil, + ), true } diff --git a/graphql/admin/restore.go b/graphql/admin/restore.go index 46f2e225997..4dfb2f6b738 100644 --- a/graphql/admin/restore.go +++ b/graphql/admin/restore.go @@ -73,13 +73,13 @@ func resolveRestore(ctx context.Context, m schema.Mutation) (*resolve.Resolved, wg := &sync.WaitGroup{} err = worker.ProcessRestoreRequest(context.Background(), &req, wg) if err != nil { - return &resolve.Resolved{ - Data: map[string]interface{}{m.Name(): map[string]interface{}{ + return resolve.DataResult( + m, + map[string]interface{}{m.Name(): map[string]interface{}{ "code": "Failure", }}, - Field: m, - Err: schema.GQLWrapLocationf(err, m.Location(), "resolving %s failed", m.Name()), - }, false + schema.GQLWrapLocationf(err, m.Location(), "resolving %s failed", m.Name()), + ), false } go func() { @@ -87,13 +87,14 @@ func resolveRestore(ctx context.Context, m schema.Mutation) (*resolve.Resolved, edgraph.ResetAcl(nil) }() - return &resolve.Resolved{ - Data: map[string]interface{}{m.Name(): map[string]interface{}{ + return resolve.DataResult( + m, + map[string]interface{}{m.Name(): map[string]interface{}{ "code": "Success", "message": "Restore operation started.", }}, - Field: m, - }, true + nil, + ), true } func getRestoreInput(m schema.Mutation) (*restoreInput, error) { diff --git a/graphql/admin/schema.go b/graphql/admin/schema.go index ca9780bb6c0..0e1ac0a5f85 100644 --- a/graphql/admin/schema.go +++ b/graphql/admin/schema.go @@ -17,26 +17,19 @@ package admin import ( - "bytes" "context" "encoding/json" - dgoapi "github.com/dgraph-io/dgo/v200/protos/api" - "github.com/dgraph-io/dgraph/edgraph" - "github.com/dgraph-io/dgraph/gql" "github.com/dgraph-io/dgraph/graphql/resolve" "github.com/dgraph-io/dgraph/graphql/schema" "github.com/dgraph-io/dgraph/query" - "github.com/dgraph-io/dgraph/x" "github.com/dgryski/go-farm" "github.com/golang/glog" ) type getSchemaResolver struct { admin *adminServer - - gqlQuery schema.Query } type updateGQLSchemaInput struct { @@ -84,76 +77,36 @@ func (usr *updateSchemaResolver) Resolve(ctx context.Context, m schema.Mutation) } } - return &resolve.Resolved{ - Data: map[string]interface{}{ + return resolve.DataResult( + m, + map[string]interface{}{ m.Name(): map[string]interface{}{ "gqlSchema": map[string]interface{}{ "id": query.UidToHex(resp.Uid), "schema": input.Set.Schema, "generatedSchema": schHandler.GQLSchema(), }}}, - Field: m, - Err: nil, - }, true + nil), true } -func (gsr *getSchemaResolver) Rewrite(ctx context.Context, - gqlQuery schema.Query) ([]*gql.GraphQuery, error) { - gsr.gqlQuery = gqlQuery - return nil, nil -} +func (gsr *getSchemaResolver) Resolve(ctx context.Context, q schema.Query) *resolve.Resolved { + var data map[string]interface{} -func (gsr *getSchemaResolver) Execute( - ctx context.Context, - req *dgoapi.Request, - field schema.Field) (*dgoapi.Response, error) { gsr.admin.mux.RLock() defer gsr.admin.mux.RUnlock() - b, err := doQuery(gsr.admin.schema, gsr.gqlQuery) - return &dgoapi.Response{Json: b}, err -} - -func (gsr *getSchemaResolver) CommitOrAbort(ctx context.Context, tc *dgoapi.TxnContext) error { - return nil -} - -func doQuery(gql *gqlSchema, field schema.Field) ([]byte, error) { - var buf bytes.Buffer - x.Check2(buf.WriteString(`{ "`)) - x.Check2(buf.WriteString(field.Name())) - - if gql.ID == "" { - x.Check2(buf.WriteString(`": null }`)) - return buf.Bytes(), nil - } - - x.Check2(buf.WriteString(`": {`)) - - for i, sel := range field.SelectionSet() { - var val []byte - var err error - switch sel.Name() { - case "id": - val, err = json.Marshal(gql.ID) - case "schema": - val, err = json.Marshal(gql.Schema) - case "generatedSchema": - val, err = json.Marshal(gql.GeneratedSchema) - } - x.Check2(val, err) - - if i != 0 { - x.Check2(buf.WriteString(",")) - } - x.Check2(buf.WriteString(`"`)) - x.Check2(buf.WriteString(sel.Name())) - x.Check2(buf.WriteString(`":`)) - x.Check2(buf.Write(val)) + if gsr.admin.schema.ID == "" { + data = map[string]interface{}{q.Name(): nil} + } else { + data = map[string]interface{}{ + q.Name(): map[string]interface{}{ + "id": gsr.admin.schema.ID, + "schema": gsr.admin.schema.Schema, + "generatedSchema": gsr.admin.schema.GeneratedSchema, + }} } - x.Check2(buf.WriteString("}}")) - return buf.Bytes(), nil + return resolve.DataResult(q, data, nil) } func getSchemaInput(m schema.Mutation) (*updateGQLSchemaInput, error) { diff --git a/graphql/admin/shutdown.go b/graphql/admin/shutdown.go index fde742297f5..8d3c30df452 100644 --- a/graphql/admin/shutdown.go +++ b/graphql/admin/shutdown.go @@ -36,8 +36,9 @@ func resolveShutdown(ctx context.Context, m schema.Mutation) (*resolve.Resolved, ServerCloser.Signal() - return &resolve.Resolved{ - Data: map[string]interface{}{m.Name(): response("Success", "Server is shutting down")}, - Field: m, - }, true + return resolve.DataResult( + m, + map[string]interface{}{m.Name(): response("Success", "Server is shutting down")}, + nil, + ), true } diff --git a/graphql/admin/state.go b/graphql/admin/state.go index 5a2597990ec..8385fa57193 100644 --- a/graphql/admin/state.go +++ b/graphql/admin/state.go @@ -56,15 +56,16 @@ func resolveState(ctx context.Context, q schema.Query) *resolve.Resolved { return resolve.EmptyResult(q, err) } var resultState map[string]interface{} - err = json.Unmarshal(b, &resultState) + err = resolve.Unmarshal(b, &resultState) if err != nil { return resolve.EmptyResult(q, err) } - return &resolve.Resolved{ - Data: map[string]interface{}{q.Name(): resultState}, - Field: q, - } + return resolve.DataResult( + q, + map[string]interface{}{q.Name(): resultState}, + nil, + ) } // convertToGraphQLResp converts MembershipState proto to GraphQL layer response diff --git a/graphql/resolve/auth_delete_test.yaml b/graphql/resolve/auth_delete_test.yaml index 8c820eced1e..f70d1e63c9c 100644 --- a/graphql/resolve/auth_delete_test.yaml +++ b/graphql/resolve/auth_delete_test.yaml @@ -344,7 +344,7 @@ } dgquerysec: |- query { - x as var() + var() DeleteLogPayload.log() } diff --git a/graphql/resolve/auth_test.go b/graphql/resolve/auth_test.go index dde8c93eccc..3bc5e0358bc 100644 --- a/graphql/resolve/auth_test.go +++ b/graphql/resolve/auth_test.go @@ -734,7 +734,7 @@ func checkAddUpdateCase( skipAuth: tcase.SkipAuth, length: length, } - resolver := NewDgraphResolver(rewriter(), ex, StdMutationCompletion(mut.ResponseName())) + resolver := NewDgraphResolver(rewriter(), ex) // -- Act -- resolved, _ := resolver.Resolve(ctx, mut) diff --git a/graphql/resolve/custom_mutation_test.yaml b/graphql/resolve/custom_mutation_test.yaml index 0a6523651cb..25388aff27a 100644 --- a/graphql/resolve/custom_mutation_test.yaml +++ b/graphql/resolve/custom_mutation_test.yaml @@ -59,7 +59,8 @@ }, { "id": "0x3", - "name": "Mov2" + "name": "Mov2", + "director": [] } ] } diff --git a/graphql/resolve/custom_query_test.yaml b/graphql/resolve/custom_query_test.yaml index d83f68135c9..dc3b092c0b4 100644 --- a/graphql/resolve/custom_query_test.yaml +++ b/graphql/resolve/custom_query_test.yaml @@ -46,7 +46,8 @@ }, { "id": "0x3", - "name": "Star Trek" + "name": "Star Trek", + "director": [] } ] } @@ -89,18 +90,11 @@ resolvedresponse: | { "myFavoriteMoviesPart2": [ - { - "id": "0x1", - "director": [ - { - "id": "0x2", - "name": "George Lucas" - } - ] - }, + null, { "id": "0x3", - "name": "Star Trek" + "name": "Star Trek", + "director": [] } ] } diff --git a/graphql/resolve/mutation.go b/graphql/resolve/mutation.go index 08fc9f2cfdf..bb0ab183362 100644 --- a/graphql/resolve/mutation.go +++ b/graphql/resolve/mutation.go @@ -146,15 +146,10 @@ func (mr MutationResolverFunc) Resolve(ctx context.Context, m schema.Mutation) ( // 2) execute the mutation with me (return error if failed) // 3) write a query for the mutation with mr (return error if failed) // 4) execute the query with qe (return error if failed) -// 5) process the result with rc -func NewDgraphResolver( - mr MutationRewriter, - ex DgraphExecutor, - rc ResultCompleter) MutationResolver { +func NewDgraphResolver(mr MutationRewriter, ex DgraphExecutor) MutationResolver { return &dgraphResolver{ mutationRewriter: mr, executor: ex, - resultCompleter: rc, } } @@ -162,7 +157,6 @@ func NewDgraphResolver( type dgraphResolver struct { mutationRewriter MutationRewriter executor DgraphExecutor - resultCompleter ResultCompleter } func (mr *dgraphResolver) Resolve(ctx context.Context, m schema.Mutation) (*Resolved, bool) { @@ -184,7 +178,6 @@ func (mr *dgraphResolver) Resolve(ctx context.Context, m schema.Mutation) (*Reso defer timer.Stop() resolved, success := mr.rewriteAndExecute(ctx, m) - mr.resultCompleter.Complete(ctx, resolved) resolverTrace.Dgraph = resolved.Extensions.Tracing.Execution.Resolvers[0].Dgraph resolved.Extensions.Tracing.Execution.Resolvers[0] = resolverTrace return resolved, success @@ -234,8 +227,9 @@ func (mr *dgraphResolver) rewriteAndExecute(ctx context.Context, emptyResult := func(err error) *Resolved { return &Resolved{ - // all the standard mutations are nullable - Data: []byte(`{"` + mutation.ResponseName() + `":null}`), + // all the standard mutations are nullable objects, so Data should pretty-much be + // {"mutAlias":null} everytime. + Data: mutation.NullResponse(), Field: mutation, // there is no completion down the pipeline, so error's path should be prepended with // mutation's alias before returning the response. @@ -388,11 +382,15 @@ func completeMutationResult(mutation schema.Mutation, qryResult []byte, numUids x.Check2(buf.WriteString(`"Deleted"`)) } case schema.NumUid: + // Although theoretically it is possible that numUids can be out of the int32 range but + // we don't need to apply coercion rules here as per Int type because carrying out a + // mutation which mutates more than 2 billion uids doesn't seem a practical case. + // So, we are skipping coercion here. x.Check2(buf.WriteString(strconv.Itoa(numUids))) default: // this has to be queryField if len(qryResult) == 0 { // don't write null, instead write [] as query field is always a nullable list - x.Check2(buf.WriteString("[]")) + x.Check2(buf.Write(schema.JsonEmptyList)) } else { // need to write only the value returned for query field, so need to remove the JSON // key till colon (:) and also the ending brace }. diff --git a/graphql/resolve/mutation_test.go b/graphql/resolve/mutation_test.go index d0ea8d7e0eb..27e95e00c7d 100644 --- a/graphql/resolve/mutation_test.go +++ b/graphql/resolve/mutation_test.go @@ -328,13 +328,11 @@ func TestCustomHTTPMutation(t *testing.T) { gqlMutation := test.GetMutation(t, op) client := newClient(t, tcase) - resolver := NewHTTPMutationResolver(client, StdQueryCompletion()) + resolver := NewHTTPMutationResolver(client) resolved, isResolved := resolver.Resolve(context.Background(), gqlMutation) require.True(t, isResolved) - b, err := json.Marshal(resolved.Data) - require.NoError(t, err) - testutil.CompareJSON(t, tcase.ResolvedResponse, string(b)) + testutil.CompareJSON(t, tcase.ResolvedResponse, string(resolved.Data)) }) } } diff --git a/graphql/resolve/query.go b/graphql/resolve/query.go index 187c26d9d4d..c2f681c2fca 100644 --- a/graphql/resolve/query.go +++ b/graphql/resolve/query.go @@ -55,17 +55,15 @@ func (qr QueryResolverFunc) Resolve(ctx context.Context, query schema.Query) *Re // NewQueryResolver creates a new query resolver. The resolver runs the pipeline: // 1) rewrite the query using qr (return error if failed) -// 2) execute the rewritten query with qe (return error if failed) -// 3) process the result with rc -func NewQueryResolver(qr QueryRewriter, ex DgraphExecutor, rc ResultCompleter) QueryResolver { - return &queryResolver{queryRewriter: qr, executor: ex, resultCompleter: rc} +// 2) execute the rewritten query with ex (return error if failed) +func NewQueryResolver(qr QueryRewriter, ex DgraphExecutor) QueryResolver { + return &queryResolver{queryRewriter: qr, executor: ex} } // a queryResolver can resolve a single GraphQL query field. type queryResolver struct { - queryRewriter QueryRewriter - executor DgraphExecutor - resultCompleter ResultCompleter + queryRewriter QueryRewriter + executor DgraphExecutor } func (qr *queryResolver) Resolve(ctx context.Context, query schema.Query) *Resolved { @@ -84,11 +82,6 @@ func (qr *queryResolver) Resolve(ctx context.Context, query schema.Query) *Resol defer timer.Stop() resolved := qr.rewriteAndExecute(ctx, query) - if resolved.Data == nil { - resolved.Data = map[string]interface{}{query.Name(): nil} - } - - qr.resultCompleter.Complete(ctx, resolved) resolverTrace.Dgraph = resolved.Extensions.Tracing.Execution.Resolvers[0].Dgraph resolved.Extensions.Tracing.Execution.Resolvers[0] = resolverTrace return resolved @@ -107,14 +100,12 @@ func (qr *queryResolver) rewriteAndExecute(ctx context.Context, query schema.Que } emptyResult := func(err error) *Resolved { - var nullVal string - if query.Type().ListType() != nil { - nullVal = "[]" - } else { - nullVal = "null" - } return &Resolved{ - Data: []byte(`{"` + query.ResponseName() + `":` + nullVal + `}`), + // all the auto-generated queries are nullable, but users may define queries with + // @custom(dql: ...) which may be non-nullable. So, we need to set the Data field + // only if the query was nullable and keep it nil if it was non-nullable. + // query.NullResponse() method handles that. + Data: query.NullResponse(), Field: query, Err: schema.SetPathIfEmpty(err, query.ResponseName()), Extensions: ext, @@ -172,17 +163,10 @@ func (qr *queryResolver) rewriteAndExecute(ctx context.Context, query schema.Que func resolveIntrospection(ctx context.Context, q schema.Query) *Resolved { data, err := schema.Introspect(q) - - var result map[string]interface{} - var err2 error - if len(data) > 0 { - err2 = json.Unmarshal(data, &result) - } - return &Resolved{ - Data: result, + Data: data, Field: q, - Err: schema.AppendGQLErrs(err, err2), + Err: err, } } diff --git a/graphql/resolve/query_test.go b/graphql/resolve/query_test.go index 6e9c6a768b0..b9355318360 100644 --- a/graphql/resolve/query_test.go +++ b/graphql/resolve/query_test.go @@ -155,11 +155,10 @@ func TestCustomHTTPQuery(t *testing.T) { gqlQuery := test.GetQuery(t, op) client := newClient(t, tcase) - resolver := NewHTTPQueryResolver(client, StdQueryCompletion()) + resolver := NewHTTPQueryResolver(client) resolved := resolver.Resolve(context.Background(), gqlQuery) - b, err := json.Marshal(resolved.Data) - require.NoError(t, err) - testutil.CompareJSON(t, tcase.ResolvedResponse, string(b)) + + testutil.CompareJSON(t, tcase.ResolvedResponse, string(resolved.Data)) }) } } diff --git a/graphql/resolve/resolver.go b/graphql/resolve/resolver.go index f8feb060de4..6cc70bd26c8 100644 --- a/graphql/resolve/resolver.go +++ b/graphql/resolve/resolver.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "encoding/json" - "fmt" "io" "io/ioutil" "math" @@ -95,13 +94,6 @@ type ResolverFactory interface { WithSchemaIntrospection() ResolverFactory } -// A ResultCompleter can take a []byte slice representing an intermediate result -// in resolving field and applies a completion step - for example, apply GraphQL -// error propagation or massaging error paths. -type ResultCompleter interface { - Complete(ctx context.Context, resolved *Resolved) -} - // RequestResolver can process GraphQL requests and write GraphQL JSON responses. // A schema.Request may contain any number of queries or mutations (never both). // RequestResolver.Resolve() resolves all of them by finding the resolved answers @@ -154,7 +146,7 @@ type adminExecutor struct { // A Resolved is the result of resolving a single field - generally a query or mutation. type Resolved struct { - Data interface{} + Data []byte Field schema.Field Err error Extensions *schema.Extensions @@ -165,15 +157,6 @@ type restErr struct { Errors x.GqlErrorList `json:"errors,omitempty"` } -// CompletionFunc is an adapter that allows us to compose completions and build a -// ResultCompleter from a function. Based on the http.HandlerFunc pattern. -type CompletionFunc func(ctx context.Context, resolved *Resolved) - -// Complete calls cf(ctx, resolved) -func (cf CompletionFunc) Complete(ctx context.Context, resolved *Resolved) { - cf(ctx, resolved) -} - // NewDgraphExecutor builds a DgraphExecutor for proxying requests through dgraph. func NewDgraphExecutor() DgraphExecutor { return newDgraphExecutor(&dgraph.DgraphEx{}) @@ -240,12 +223,8 @@ func (rf *resolverFactory) WithSchemaIntrospection() ResolverFactory { WithMutationResolver("__typename", func(m schema.Mutation) MutationResolver { return MutationResolverFunc(func(ctx context.Context, m schema.Mutation) (*Resolved, bool) { - return &Resolved{ - Data: map[string]interface{}{"__typename": "Mutation"}, - Field: m, - Err: nil, - Extensions: nil, - }, resolverSucceeded + return DataResult(m, map[string]interface{}{"__typename": "Mutation"}, nil), + resolverSucceeded }) }) } @@ -258,7 +237,7 @@ func (rf *resolverFactory) WithConventionResolvers( queries = append(queries, s.Queries(schema.AggregateQuery)...) for _, q := range queries { rf.WithQueryResolver(q, func(q schema.Query) QueryResolver { - return NewQueryResolver(fns.Qrw, fns.Ex, StdQueryCompletion()) + return NewQueryResolver(fns.Qrw, fns.Ex) }) } @@ -267,32 +246,32 @@ func (rf *resolverFactory) WithConventionResolvers( return NewHTTPQueryResolver(&http.Client{ // TODO - This can be part of a config later. Timeout: time.Minute, - }, StdQueryCompletion()) + }) }) } for _, q := range s.Queries(schema.DQLQuery) { rf.WithQueryResolver(q, func(q schema.Query) QueryResolver { // DQL queries don't need any QueryRewriter - return NewQueryResolver(nil, fns.Ex, StdQueryCompletion()) + return NewQueryResolver(nil, fns.Ex) }) } for _, m := range s.Mutations(schema.AddMutation) { rf.WithMutationResolver(m, func(m schema.Mutation) MutationResolver { - return NewDgraphResolver(fns.Arw(), fns.Ex, StdMutationCompletion(m.Name())) + return NewDgraphResolver(fns.Arw(), fns.Ex) }) } for _, m := range s.Mutations(schema.UpdateMutation) { rf.WithMutationResolver(m, func(m schema.Mutation) MutationResolver { - return NewDgraphResolver(fns.Urw(), fns.Ex, StdMutationCompletion(m.Name())) + return NewDgraphResolver(fns.Urw(), fns.Ex) }) } for _, m := range s.Mutations(schema.DeleteMutation) { rf.WithMutationResolver(m, func(m schema.Mutation) MutationResolver { - return NewDgraphResolver(fns.Drw, fns.Ex, StdDeleteCompletion(m.Name())) + return NewDgraphResolver(fns.Drw, fns.Ex) }) } @@ -301,7 +280,7 @@ func (rf *resolverFactory) WithConventionResolvers( return NewHTTPMutationResolver(&http.Client{ // TODO - This can be part of a config later. Timeout: time.Minute, - }, StdQueryCompletion()) + }) }) } @@ -343,21 +322,6 @@ func NewResolverFactory( } } -// StdQueryCompletion is the completion steps that get run for queries -func StdQueryCompletion() CompletionFunc { - return noopCompletion -} - -// StdMutationCompletion is the completion steps that get run for add and update mutations -func StdMutationCompletion(name string) CompletionFunc { - return noopCompletion -} - -// StdDeleteCompletion is the completion steps that get run for delete mutations -func StdDeleteCompletion(name string) CompletionFunc { - return noopCompletion -} - func (rf *resolverFactory) queryResolverFor(query schema.Query) QueryResolver { rf.RLock() defer rf.RUnlock() @@ -576,19 +540,8 @@ func addResult(resp *schema.Response, res *Resolved) { // According to GraphQL spec, out of all the queries in the request, if any one query // returns null but expected return type is non-nullable then we set root data to null. resp.SetDataNull() - } else if b, ok := res.Data.([]byte); ok { - resp.AddData(b) } else { - path := make([]interface{}, 0, maxPathLength(res.Field)) - //var b []byte - var gqlErr x.GqlErrorList - - if res.Data != nil { - b, gqlErr = completeObject(path, []schema.Field{res.Field}, - res.Data.(map[string]interface{})) - } - resp.WithError(gqlErr) - resp.AddData(b) + resp.AddData(res.Data) } resp.WithError(res.Err) @@ -799,7 +752,8 @@ func completeDgraphResult( } return &Resolved{ - Data: valToComplete, + // TODO (cleanup): remove this function altogether, not needed anymore + Data: nil, // valToComplete Field: field, Err: errs, } @@ -1304,6 +1258,9 @@ func completeAlias(field schema.Field, buf *bytes.Buffer) { // completeObject builds a json GraphQL result object for the current query level. // It returns a bracketed json object like { f1:..., f2:..., ... }. +// At present, it is only used for building custom results by: +// * Admin Server +// * @custom(http: {...}) query/mutation // // fields are all the fields from this bracketed level in the GraphQL query, e.g: // { @@ -1344,66 +1301,28 @@ func completeObject( var buf bytes.Buffer comma := "" - // Below map keeps track of fields which have been seen as part of + // seenField keeps track of fields which have been seen as part of // interface to avoid double entry in the resulting response seenField := make(map[string]bool) x.Check2(buf.WriteRune('{')) - dgraphTypes, ok := res["dgraph.type"].([]interface{}) + // @custom(http: {...}) query/mutation results may return __typename in response for abstract + // fields, lets use that information if present. + typename, ok := res[schema.Typename].(string) + var dgraphTypes []string + if ok && len(typename) > 0 { + dgraphTypes = []string{typename} + } - // fieldSeenCount is to keep track the number of times a specific field - // has been seen till now. It is used in generating Unique Dgraph Alias - // to extract out the corresponding value of field from the response. - fieldSeenCount := make(map[string]int) for _, f := range fields { - if f.Skip() || !f.Include() { - continue - } - - includeField := true - // If typ is an interface, and dgraphTypes contains another type, then we ignore - // fields which don't start with that type. This would happen when multiple - // fragments (belonging to different types) are requested within a query for an interface. - - // If the dgraphPredicate doesn't start with the typ.Name(), then this field belongs to - // a concrete type, lets check that it has inputType as the prefix, otherwise skip it. - if len(dgraphTypes) > 0 { - includeField = f.IncludeInterfaceField(dgraphTypes) - } - if _, ok := seenField[f.ResponseName()]; ok { - includeField = false - } - if !includeField { + if f.SkipField(dgraphTypes, seenField) { continue } x.Check2(buf.WriteString(comma)) completeAlias(f, &buf) - seenField[f.ResponseName()] = true - - var uniqueDgraphAlias string - // In case of InbuiltType or Enums, only one alias was passed into the dgraph query. - // So just map its value to all of its occurences. - if f.Type().IsInbuiltOrEnumType() { - uniqueDgraphAlias = f.DgraphAlias() - } else { - uniqueDgraphAlias = generateUniqueDgraphAlias(f, fieldSeenCount) - } - val := res[f.Name()] // temp change just for making admin work - // Handle aggregate queries: - // Aggregate Fields in DQL response don't follow the same response as other queries. - // Create a map aggregateVal and store response of aggregate fields in a way which - // GraphQL aggregate fields expect it to be. completeValue function called later on - // will convert then fill up the GraphQL response. - if f.IsAggregateField() { - aggregateVal := make(map[string]interface{}) - for _, aggregateField := range f.SelectionSet() { - aggregateFieldName := aggregateField.Name() - aggregateVal[aggregateFieldName] = res[aggregateFieldName+"_"+uniqueDgraphAlias] - } - val = aggregateVal - } + val := res[f.Name()] if f.Name() == schema.Typename { // From GraphQL spec: // https://graphql.github.io/graphql-spec/June2018/#sec-Type-Name-Introspection @@ -1411,56 +1330,19 @@ func completeObject( // meta‐field __typename: String! when querying against any Object, Interface, // or Union. It returns the name of the object type currently being queried." - // If we have dgraph.type information, we will use that to figure out the type + // If we have __typename information, we will use that to figure out the type // otherwise we will get it from the schema. - if ok { - val = f.TypeName(dgraphTypes) - } else { - val = f.GetObjectName() - } + val = f.TypeName(dgraphTypes) } - // Check that we should check that data should be of list type when we expect - // f.Type().ListType() to be non-nil. + // Check that data should be of list type when we expect f.Type().ListType() to be non-nil. if val != nil && f.Type().ListType() != nil { switch val.(type) { case []interface{}, []map[string]interface{}: default: // We were expecting a list but got a value which wasn't a list. Lets return an // error. - return nil, x.GqlErrorList{&x.GqlError{ - Message: schema.ErrExpectedList, - Locations: []x.Location{f.Location()}, - Path: copyPath(path), - }} - } - } - - // Handle the case of empty data in Aggregate Queries. If count of data is equal - // to 0, set the val map to nil. This makes the aggregateField return null instead - // of returning "0.0000" for Min, Max function on strings and 0 for Min, Max functions - // on integers/float. - if strings.HasSuffix(f.Type().Name(), "AggregateResult") && val != nil { - var count json.Number - countVal := val.(map[string]interface{})["count"] - if countVal == nil { - // This case may happen in case of auth queries when the user does not have - // sufficient permission to query aggregate fields. We set val to nil in this - // case - val = nil - } else { - if count, ok = countVal.(json.Number); !ok { - // This is to handle case in which countVal is of any other type than - // json.Number. This should never happen. We return an error. - return nil, x.GqlErrorList{&x.GqlError{ - Message: "Expected count field of type json.Number inside Aggregate Field", - Locations: []x.Location{f.Location()}, - Path: copyPath(path), - }} - } - if count == "0" { - val = nil - } + return nil, x.GqlErrorList{f.GqlErrorf(path, schema.ErrExpectedList)} } } @@ -1470,11 +1352,10 @@ func completeObject( if !f.Type().Nullable() { return nil, errs } - completed = []byte(`null`) + completed = schema.JsonNull } x.Check2(buf.Write(completed)) comma = ", " - fieldSeenCount[f.DgraphAlias()]++ } x.Check2(buf.WriteRune('}')) @@ -1492,26 +1373,13 @@ func completeValue( case map[string]interface{}: switch field.Type().Name() { case "String", "ID", "Boolean", "Float", "Int", "Int64", "DateTime": - return nil, x.GqlErrorList{&x.GqlError{ - Message: schema.ErrExpectedScalar, - Locations: []x.Location{field.Location()}, - Path: copyPath(path), - }} + return nil, x.GqlErrorList{field.GqlErrorf(path, schema.ErrExpectedScalar)} } enumValues := field.EnumValues() if len(enumValues) > 0 { - return nil, x.GqlErrorList{&x.GqlError{ - Message: schema.ErrExpectedScalar, - Locations: []x.Location{field.Location()}, - Path: copyPath(path), - }} - } - - if field.Type().IsGeo() { - return completeGeoObject(path, field, val) - } else { - return completeObject(path, field.SelectionSet(), val) + return nil, x.GqlErrorList{field.GqlErrorf(path, schema.ErrExpectedScalar)} } + return completeObject(path, field.SelectionSet(), val) case []interface{}: return completeList(path, field, val) case []map[string]interface{}: @@ -1524,219 +1392,68 @@ func completeValue( return completeList(path, field, listVal) default: if val == nil { - if field.Type().ListType() != nil { - // We could choose to set this to null. This is our decision, not - // anything required by the GraphQL spec. - // - // However, if we query, for example, for a persons's friends with - // some restrictions, and there aren't any, is that really a case to - // set this at null and error if the list is required? What - // about if an person has just been added and doesn't have any friends? - // Doesn't seem right to add null and cause error propagation. - // - // Seems best if we pick [], rather than null, as the list value if - // there's nothing in the Dgraph result. - return []byte("[]"), nil - } - - if field.Type().Nullable() { - return []byte("null"), nil + if b := field.NullValue(); b != nil { + return b, nil } - gqlErr := x.GqlErrorf(schema.ErrExpectedNonNull, field.Name(), field.Type()).WithLocations(field.Location()) - gqlErr.Path = copyPath(path) - return nil, x.GqlErrorList{gqlErr} + return nil, x.GqlErrorList{field.GqlErrorf(path, schema.ErrExpectedNonNull, + field.Name(), field.Type())} } // val is a scalar val, gqlErr := coerceScalar(val, field, path) - if len(gqlErr) != 0 { + if len(gqlErr) > 0 { return nil, gqlErr } // Can this ever error? We can't have an unsupported type or value because - // we just unmarshaled this val. + // we just unmarshalled this val. b, err := json.Marshal(val) if err != nil { - gqlErr := x.GqlErrorf( + gqlErr := x.GqlErrorList{field.GqlErrorf(path, "Error marshalling value for field '%s' (type %s). "+ "Resolved as null (which may trigger GraphQL error propagation) ", - field.Name(), field.Type()). - WithLocations(field.Location()) - gqlErr.Path = copyPath(path) + field.Name(), field.Type())} if field.Type().Nullable() { - return []byte("null"), x.GqlErrorList{gqlErr} + return schema.JsonNull, gqlErr } - return nil, x.GqlErrorList{gqlErr} + return nil, gqlErr } return b, nil } } -// completeGeoObject builds a json GraphQL result object for the underlying geo type. -// Currently, it supports Point, Polygon and MultiPolygon. -func completeGeoObject(path []interface{}, field schema.Field, - val map[string]interface{}) ([]byte, x.GqlErrorList) { - coordinate, _ := val[schema.Coordinates].([]interface{}) - if coordinate == nil { - gqlErr := x.GqlErrorf(schema.ErrExpectedNonNull, field.Name(), - field.Type()).WithLocations(field.Location()) - gqlErr.Path = copyPath(path) - return nil, x.GqlErrorList{gqlErr} - } - - typ, _ := val["type"].(string) - var buf bytes.Buffer - switch typ { - case schema.Point: - completePoint(field, coordinate, &buf) - case schema.Polygon: - completePolygon(field, coordinate, &buf) - case schema.MultiPolygon: - completeMultiPolygon(field, coordinate, &buf) - } - - return buf.Bytes(), nil -} - -// completePoint takes in coordinates from dgraph response like [12.32, 123.32], and builds -// a JSON GraphQL result object for Point like { "longitude" : 12.32 , "latitude" : 123.32 }. -func completePoint(field schema.Field, coordinate []interface{}, buf *bytes.Buffer) { - comma := "" - - x.Check2(buf.WriteRune('{')) - for _, f := range field.SelectionSet() { - x.Check2(buf.WriteString(comma)) - completeAlias(f, buf) - - switch f.Name() { - case schema.Longitude: - x.Check2(buf.WriteString(fmt.Sprintf("%v", coordinate[0]))) - case schema.Latitude: - x.Check2(buf.WriteString(fmt.Sprintf("%v", coordinate[1]))) - case schema.Typename: - x.Check2(buf.WriteString(`"Point"`)) - } - comma = "," - } - x.Check2(buf.WriteRune('}')) -} - -// completePolygon converts the Dgraph result to GraphQL Polygon type. -// Dgraph output: coordinate: [[[22.22,11.11],[16.16,15.15],[21.21,20.2]],[[22.28,11.18],[16.18,15.18],[21.28,20.28]]] -// Graphql output: { coordinates: [ { points: [{ latitude: 11.11, longitude: 22.22}, { latitude: 15.15, longitude: 16.16} , { latitude: 20.20, longitude: 21.21} ]}, { points: [{ latitude: 11.18, longitude: 22.28}, { latitude: 15.18, longitude: 16.18} , { latitude: 20.28, longitude: 21.28}]} ] } -func completePolygon(field schema.Field, polygon []interface{}, buf *bytes.Buffer) { - comma1 := "" - - x.Check2(buf.WriteRune('{')) - for _, f1 := range field.SelectionSet() { - x.Check2(buf.WriteString(comma1)) - completeAlias(f1, buf) - - switch f1.Name() { - case schema.Coordinates: - x.Check2(buf.WriteRune('[')) - comma2 := "" - - for _, ring := range polygon { - x.Check2(buf.WriteString(comma2)) - x.Check2(buf.WriteRune('{')) - comma3 := "" - - for _, f2 := range f1.SelectionSet() { - x.Check2(buf.WriteString(comma3)) - completeAlias(f2, buf) - - switch f2.Name() { - case schema.Points: - x.Check2(buf.WriteRune('[')) - comma4 := "" - - r, _ := ring.([]interface{}) - for _, point := range r { - x.Check2(buf.WriteString(comma4)) - - p, _ := point.([]interface{}) - completePoint(f2, p, buf) - - comma4 = "," - } - x.Check2(buf.WriteRune(']')) - case schema.Typename: - x.Check2(buf.WriteString(`"PointList"`)) - } - comma3 = "," - } - x.Check2(buf.WriteRune('}')) - comma2 = "," - } - x.Check2(buf.WriteRune(']')) - case schema.Typename: - x.Check2(buf.WriteString(`"Polygon"`)) - } - comma1 = "," - } - x.Check2(buf.WriteRune('}')) -} - -// completeMultiPolygon converts the Dgraph result to GraphQL MultiPolygon type. -func completeMultiPolygon(field schema.Field, multiPolygon []interface{}, buf *bytes.Buffer) { - comma1 := "" - - x.Check2(buf.WriteRune('{')) - for _, f := range field.SelectionSet() { - x.Check2(buf.WriteString(comma1)) - completeAlias(f, buf) - - switch f.Name() { - case schema.Polygons: - x.Check2(buf.WriteRune('[')) - comma2 := "" - - for _, polygon := range multiPolygon { - x.Check2(buf.WriteString(comma2)) - - p, _ := polygon.([]interface{}) - completePolygon(f, p, buf) - - comma2 = "," - } - x.Check2(buf.WriteRune(']')) - case schema.Typename: - x.Check2(buf.WriteString(`"MultiPolygon"`)) - } - comma1 = "," - } - x.Check2(buf.WriteRune('}')) -} - // coerceScalar coerces a scalar value to field.Type() if possible according to the coercion rules -// defined in the GraphQL spec. If this is not possible, then it returns an error. +// defined in the GraphQL spec. If this is not possible, then it returns an error. The crux of +// coercion rules defined in the spec is to not lose information during coercion. +// Note that, admin server specifically uses these: +// * json.Number (Query.config.cacheMb) +// * resolve.Unmarshal() everywhere else +// And, @custom(http: {...}) query/mutation would always use resolve.Unmarshal(). +// Now, resolve.Unmarshal() can only give one of the following types for scalars: +// * bool +// * string +// * json.Number (because it uses custom JSON decoder which preserves number precision) +// So, we need to consider only these cases at present. func coerceScalar(val interface{}, field schema.Field, path []interface{}) (interface{}, x.GqlErrorList) { valueCoercionError := func(val interface{}) x.GqlErrorList { - gqlErr := x.GqlErrorf( + return x.GqlErrorList{field.GqlErrorf(path, "Error coercing value '%+v' for field '%s' to type %s.", - val, field.Name(), field.Type().Name()). - WithLocations(field.Location()) - gqlErr.Path = copyPath(path) - return x.GqlErrorList{gqlErr} + val, field.Name(), field.Type().Name())} } switch field.Type().Name() { case "String", "ID": switch v := val.(type) { - case float64: - val = strconv.FormatFloat(v, 'f', -1, 64) - case int64: - val = strconv.FormatInt(v, 10) case bool: val = strconv.FormatBool(v) case string: + // do nothing, val is already string case json.Number: val = v.String() default: @@ -1744,9 +1461,10 @@ func coerceScalar(val interface{}, field schema.Field, path []interface{}) (inte } case "Boolean": switch v := val.(type) { + case bool: + // do nothing, val is already bool case string: val = len(v) > 0 - case bool: case json.Number: valFloat, _ := v.Float64() val = valFloat != 0 @@ -1755,19 +1473,6 @@ func coerceScalar(val interface{}, field schema.Field, path []interface{}) (inte } case "Int": switch v := val.(type) { - case float64: - // The spec says that we can coerce a Float value to Int, if we don't lose information. - // See: https: //spec.graphql.org/June2018/#sec-Float - // Lets try to see if this number could be converted to int32 without losing - // information, otherwise return error. - // See: https://github.com/golang/go/issues/19405 to understand why the comparison - // should be done after double conversion. - i32Val := int32(v) - if v == float64(i32Val) { - val = i32Val - } else { - return nil, valueCoercionError(v) - } case bool: if v { val = 1 @@ -1775,42 +1480,29 @@ func coerceScalar(val interface{}, field schema.Field, path []interface{}) (inte val = 0 } case string: - i, err := strconv.ParseFloat(v, 64) - // An error can be encountered if we had a value that can't be fit into - // a 64 bit floating point number.. - // Lets try to see if this number could be converted to int32 without losing - // information, otherwise return error. + floatVal, err := strconv.ParseFloat(v, 64) if err != nil { return nil, valueCoercionError(v) } - i32Val := int32(i) - if i == float64(i32Val) { + i32Val := int32(floatVal) + if floatVal == float64(i32Val) { val = i32Val } else { return nil, valueCoercionError(v) } - case int64: - if v > math.MaxInt32 || v < math.MinInt32 { - return nil, valueCoercionError(v) - } - case int: - // numUids are added as int, so we need special handling for that. - if v > math.MaxInt32 || v < math.MinInt32 { - return nil, valueCoercionError(v) - } case json.Number: - // We have already checked range for int32 at input validation time. - // So now just parse and check errors. - i, err := strconv.ParseFloat(v.String(), 64) + // float64 can always contain 32 bit integers without any information loss + floatVal, err := v.Float64() if err != nil { return nil, valueCoercionError(v) } - i32Val := int32(i) - if i == float64(i32Val) { - val = i32Val - } else { + i32Val := int32(floatVal) // convert the float64 value to int32 + // now if converting the int32 back to float64 results in a mismatch means we lost + // information during conversion, so return error. + if floatVal != float64(i32Val) { return nil, valueCoercionError(v) } + // otherwise, do nothing as val is already a valid number in int32 range default: return nil, valueCoercionError(v) } @@ -1824,20 +1516,15 @@ func coerceScalar(val interface{}, field schema.Field, path []interface{}) (inte } case string: i, err := strconv.ParseInt(v, 10, 64) - // An error can be encountered if we had a value that can't be fit into - // a 64 bit int or because of other parsing issues. if err != nil { return nil, valueCoercionError(v) } val = i case json.Number: - // To use whole 64-bit range for int64 without any coercing, - // We pass int64 values as string to dgraph and parse it as integer here - i, err := strconv.ParseInt(v.String(), 10, 64) - if err != nil { + if _, err := v.Int64(); err != nil { return nil, valueCoercionError(v) } - val = i + // do nothing, as val is already a valid number in int64 range default: return nil, valueCoercionError(v) } @@ -1851,30 +1538,34 @@ func coerceScalar(val interface{}, field schema.Field, path []interface{}) (inte } case string: i, err := strconv.ParseFloat(v, 64) - // An error can be encountered if we had a value that can't be fit into - // a 64 bit floating point number or because of other parsing issues. if err != nil { return nil, valueCoercionError(v) } val = i case json.Number: - i, err := strconv.ParseFloat(v.String(), 64) + _, err := v.Float64() if err != nil { return nil, valueCoercionError(v) } - val = i - case float64: + // do nothing, as val is already a valid number in float default: return nil, valueCoercionError(v) } case "DateTime": switch v := val.(type) { case string: - if _, err := types.ParseTime(v); err != nil { + if t, err := types.ParseTime(v); err != nil { return nil, valueCoercionError(v) + } else { + // let's make sure that we always return a string in RFC3339 format as the original + // string could have been in some other format. + val = t.Format(time.RFC3339) } case json.Number: - valFloat, _ := v.Float64() + valFloat, err := v.Float64() + if err != nil { + return nil, valueCoercionError(v) + } truncated := math.Trunc(valFloat) if truncated == valFloat { // Lets interpret int values as unix timestamp. @@ -1896,16 +1587,10 @@ func coerceScalar(val interface{}, field schema.Field, path []interface{}) (inte switch v := val.(type) { case string: // Lets check that the enum value is valid. - valid := false - for _, ev := range enumValues { - if ev == v { - valid = true - break - } - } - if !valid { + if !x.HasString(enumValues, v) { return nil, valueCoercionError(val) } + // do nothing, as val already has a valid value default: return nil, valueCoercionError(v) } @@ -1935,19 +1620,19 @@ func completeList( field schema.Field, values []interface{}) ([]byte, x.GqlErrorList) { - var buf bytes.Buffer - var errs x.GqlErrorList - comma := "" - if field.Type().ListType() == nil { - // This means a bug on our part - in rewriting, schema generation, - // or Dgraph returned something unexpected. + // This means either a bug on our part - in admin server. + // or @custom query/mutation returned something unexpected. // // Let's crush it to null so we still get something from the rest of the // query and log the error. - return mismatched(path, field, values) + return mismatched(path, field) } + var buf bytes.Buffer + var errs x.GqlErrorList + comma := "" + x.Check2(buf.WriteRune('[')) for i, b := range values { r, err := completeValue(append(path, i), field, b) @@ -1971,7 +1656,7 @@ func completeList( // https://graphql.github.io/graphql-spec/June2018/#sec-Errors return nil, errs } - x.Check2(buf.WriteString("null")) + x.Check2(buf.Write(schema.JsonNull)) } else { x.Check2(buf.Write(r)) } @@ -1982,70 +1667,32 @@ func completeList( return buf.Bytes(), errs } -func mismatched( - path []interface{}, - field schema.Field, - values []interface{}) ([]byte, x.GqlErrorList) { - +func mismatched(path []interface{}, field schema.Field) ([]byte, x.GqlErrorList) { glog.Errorf("completeList() called in resolving %s (Line: %v, Column: %v), "+ "but its type is %s.\n"+ "That could indicate the Dgraph schema doesn't match the GraphQL schema.", field.Name(), field.Location().Line, field.Location().Column, field.Type().Name()) - gqlErr := &x.GqlError{ - Message: schema.ErrExpectedSingleItem, - Locations: []x.Location{field.Location()}, - Path: copyPath(path), - } - val, errs := completeValue(path, field, nil) - return val, append(errs, gqlErr) -} - -func copyPath(path []interface{}) []interface{} { - result := make([]interface{}, len(path)) - copy(result, path) - return result -} - -// maxPathLength finds the max length (including list indexes) of any path in the 'query' f. -// Used to pre-allocate a path buffer of the correct size before running completeObject on -// the top level query - means that we aren't reallocating slices multiple times -// during the complete* functions. -func maxPathLength(f schema.Field) int { - childMax := 0 - for _, chld := range f.SelectionSet() { - d := maxPathLength(chld) - if d > childMax { - childMax = d - } - } - if f.Type().ListType() != nil { - // It's f: [...], so add a space for field name and - // a space for the index into the list - return 2 + childMax - } - - return 1 + childMax + return val, append(errs, field.GqlErrorf(path, schema.ErrExpectedSingleItem)) } // a httpResolver can resolve a single GraphQL field from an HTTP endpoint type httpResolver struct { *http.Client - resultCompleter ResultCompleter } type httpQueryResolver httpResolver type httpMutationResolver httpResolver // NewHTTPQueryResolver creates a resolver that can resolve GraphQL query from an HTTP endpoint -func NewHTTPQueryResolver(hc *http.Client, rc ResultCompleter) QueryResolver { - return &httpQueryResolver{hc, rc} +func NewHTTPQueryResolver(hc *http.Client) QueryResolver { + return &httpQueryResolver{hc} } // NewHTTPMutationResolver creates a resolver that resolves GraphQL mutation from an HTTP endpoint -func NewHTTPMutationResolver(hc *http.Client, rc ResultCompleter) MutationResolver { - return &httpMutationResolver{hc, rc} +func NewHTTPMutationResolver(hc *http.Client) MutationResolver { + return &httpMutationResolver{hc} } func (hr *httpResolver) Resolve(ctx context.Context, field schema.Field) *Resolved { @@ -2054,7 +1701,6 @@ func (hr *httpResolver) Resolve(ctx context.Context, field schema.Field) *Resolv defer stop() resolved := hr.rewriteAndExecute(ctx, field) - hr.resultCompleter.Complete(ctx, resolved) return resolved } @@ -2108,17 +1754,9 @@ func getBodyForLambda(ctx context.Context, field schema.Field, parents, } func (hr *httpResolver) rewriteAndExecute(ctx context.Context, field schema.Field) *Resolved { - emptyResult := func(err error) *Resolved { - return &Resolved{ - Data: map[string]interface{}{field.Name(): nil}, - Field: field, - Err: schema.AsGQLErrors(err), - } - } - hrc, err := field.CustomHTTPConfig() if err != nil { - return emptyResult(err) + return EmptyResult(field, err) } var body string @@ -2129,14 +1767,14 @@ func (hr *httpResolver) rewriteAndExecute(ctx context.Context, field schema.Fiel } b, err := json.Marshal(jsonTemplate) if err != nil { - return emptyResult(jsonMarshalError(err, field, *hrc.Template)) + return EmptyResult(field, jsonMarshalError(err, field, *hrc.Template)) } body = string(b) } b, status, err := makeRequest(hr.Client, hrc.Method, hrc.URL, body, hrc.ForwardHeaders) if err != nil { - return emptyResult(externalRequestError(err, field)) + return EmptyResult(field, externalRequestError(err, field)) } // this means it had body and not graphql, so just unmarshal it and return @@ -2144,19 +1782,15 @@ func (hr *httpResolver) rewriteAndExecute(ctx context.Context, field schema.Fiel var result interface{} var rerr restErr if status >= 200 && status < 300 { - if err := json.Unmarshal(b, &result); err != nil { - return emptyResult(jsonUnmarshalError(err, field)) + if err := Unmarshal(b, &result); err != nil { + return EmptyResult(field, jsonUnmarshalError(err, field)) } - } else if err := json.Unmarshal(b, &rerr); err != nil { + } else if err := Unmarshal(b, &rerr); err != nil { err = errors.Errorf("unexpected error with: %v", status) rerr.Errors = x.GqlErrorList{externalRequestError(err, field)} } - return &Resolved{ - Data: map[string]interface{}{field.Name(): result}, - Field: field, - Err: rerr.Errors, - } + return DataResult(field, map[string]interface{}{field.Name(): result}, rerr.Errors) } // we will reach here if it was a graphql request @@ -2164,21 +1798,17 @@ func (hr *httpResolver) rewriteAndExecute(ctx context.Context, field schema.Fiel Data map[string]interface{} `json:"data,omitempty"` Errors x.GqlErrorList `json:"errors,omitempty"` } - err = json.Unmarshal(b, &resp) + err = Unmarshal(b, &resp) if err != nil { gqlErr := jsonUnmarshalError(err, field) resp.Errors = append(resp.Errors, schema.AsGQLErrors(gqlErr)...) - return emptyResult(resp.Errors) + return EmptyResult(field, resp.Errors) } data, ok := resp.Data[hrc.RemoteGqlQueryName] if !ok { - return emptyResult(resp.Errors) - } - return &Resolved{ - Data: map[string]interface{}{field.Name(): data}, - Field: field, - Err: resp.Errors, + return EmptyResult(field, resp.Errors) } + return DataResult(field, map[string]interface{}{field.Name(): data}, resp.Errors) } func (h *httpQueryResolver) Resolve(ctx context.Context, query schema.Query) *Resolved { @@ -2191,14 +1821,32 @@ func (h *httpMutationResolver) Resolve(ctx context.Context, mutation schema.Muta return resolved, resolved.Err == nil || resolved.Err.Error() == "" } +// Unmarshal is like json.Unmarshal() except it uses a custom decoder which preserves number +// precision by unmarshalling them into json.Number. +func Unmarshal(data []byte, v interface{}) error { + decoder := json.NewDecoder(bytes.NewReader(data)) + decoder.UseNumber() + return decoder.Decode(v) +} + func EmptyResult(f schema.Field, err error) *Resolved { return &Resolved{ - Data: map[string]interface{}{f.Name(): nil}, + Data: f.NullResponse(), Field: f, Err: schema.GQLWrapLocationf(err, f.Location(), "resolving %s failed", f.Name()), } } +func DataResult(f schema.Field, data map[string]interface{}, err error) *Resolved { + b, errs := completeObject(make([]interface{}, 0, f.MaxPathLength()), []schema.Field{f}, data) + + return &Resolved{ + Data: b, + Field: f, + Err: schema.AppendGQLErrs(err, errs), + } +} + func newtimer(ctx context.Context, Duration *schema.OffsetDuration) schema.OffsetTimer { resolveStartTime, _ := ctx.Value(resolveStartTime).(time.Time) tf := schema.NewOffsetTimerFactory(resolveStartTime) diff --git a/graphql/schema/introspection.go b/graphql/schema/introspection.go index 0027ea9a9e5..a87cf251edb 100644 --- a/graphql/schema/introspection.go +++ b/graphql/schema/introspection.go @@ -90,7 +90,7 @@ func (ec *executionContext) writeStringValue(val string) { func (ec *executionContext) writeOptionalStringValue(val *string) { if val == nil { - x.Check2(ec.b.WriteString("null")) + x.Check2(ec.b.Write(JsonNull)) } else { ec.writeStringValue(*val) } @@ -211,7 +211,7 @@ func (ec *executionContext) handleEnumValue(sel ast.SelectionSet, obj *introspec if i != 0 { x.Check2(ec.b.WriteRune(',')) } - ec.writeKey(field.Name) + ec.writeKey(field.Alias) switch field.Name { case Typename: ec.writeStringValue("__EnumValue") @@ -293,7 +293,7 @@ func (ec *executionContext) handleSchema(sel ast.SelectionSet, obj *introspectio if i != 0 { x.Check2(ec.b.WriteRune(',')) } - ec.writeKey(field.Name) + ec.writeKey(field.Alias) switch field.Name { case Typename: ec.writeStringValue("__Schema") @@ -390,7 +390,7 @@ func (ec *executionContext) marshalIntrospectionTypeSlice(sel ast.SelectionSet, func (ec *executionContext) marshalIntrospectionType(sel ast.SelectionSet, v *introspection.Type) { if v == nil { // TODO - This should be an error as this field is mandatory. - x.Check2(ec.b.WriteString("null")) + x.Check2(ec.b.Write(JsonNull)) return } ec.handleType(sel, v) @@ -399,7 +399,7 @@ func (ec *executionContext) marshalIntrospectionType(sel ast.SelectionSet, v *in func (ec *executionContext) marshalOptionalEnumValueSlice(sel ast.SelectionSet, v []introspection.EnumValue) { if v == nil { - x.Check2(ec.b.WriteString("null")) + x.Check2(ec.b.Write(JsonNull)) return } x.Check2(ec.b.WriteRune('[')) @@ -415,7 +415,7 @@ func (ec *executionContext) marshalOptionalEnumValueSlice(sel ast.SelectionSet, func (ec *executionContext) marshalIntrospectionFieldSlice(sel ast.SelectionSet, v []introspection.Field) { if v == nil { - x.Check2(ec.b.WriteString("null")) + x.Check2(ec.b.Write(JsonNull)) return } x.Check2(ec.b.WriteRune('[')) @@ -447,7 +447,7 @@ func (ec *executionContext) marshalOptionalInputValueSlice(sel ast.SelectionSet, func (ec *executionContext) marshalOptionalItypeSlice(sel ast.SelectionSet, v []introspection.Type) { if v == nil { - x.Check2(ec.b.WriteString("null")) + x.Check2(ec.b.Write(JsonNull)) return } @@ -463,7 +463,7 @@ func (ec *executionContext) marshalOptionalItypeSlice(sel ast.SelectionSet, func (ec *executionContext) marshalType(sel ast.SelectionSet, v *introspection.Type) { if v == nil { - x.Check2(ec.b.WriteString("null")) + x.Check2(ec.b.Write(JsonNull)) return } ec.handleType(sel, v) diff --git a/graphql/schema/response.go b/graphql/schema/response.go index 52ca20158f4..c3f87aea7b7 100644 --- a/graphql/schema/response.go +++ b/graphql/schema/response.go @@ -54,6 +54,13 @@ const ( "GraphQL error propagation triggered." ) +var ( + // JsonNull are the bytes to represent null in JSON. + JsonNull = []byte("null") + // JsonEmptyList are the bytes to represent an empty list in JSON. + JsonEmptyList = []byte("[]") +) + // GraphQL spec on response is here: // https://graphql.github.io/graphql-spec/June2018/#sec-Response @@ -139,7 +146,7 @@ func (r *Response) AddData(p []byte) { func (r *Response) SetDataNull() { r.dataIsNull = true r.Data.Reset() - x.Check2(r.Data.WriteString("null")) + x.Check2(r.Data.Write(JsonNull)) } // MergeExtensions merges the extensions given in ext to r. @@ -182,7 +189,7 @@ func (r *Response) Output() interface{} { Data json.RawMessage `json:"data,omitempty"` }{ Errors: []byte(`[{"message": "Internal error - no response to write."}]`), - Data: []byte("null"), + Data: JsonNull, } } diff --git a/graphql/schema/wrappers.go b/graphql/schema/wrappers.go index ce5a7841c0f..942d35ac261 100644 --- a/graphql/schema/wrappers.go +++ b/graphql/schema/wrappers.go @@ -125,6 +125,12 @@ type Field interface { SetArgTo(arg string, val interface{}) Skip() bool Include() bool + // SkipField tells whether to skip this field during completion or not based on: + // * @skip + // * @include + // * __typename: used for skipping fields in abstract types + // * seenField: used for skipping when the field has already been seen at the current level + SkipField(dgraphTypes []string, seenField map[string]bool) bool Cascade() []string HasCustomDirective() (bool, map[string]FieldDefinition) HasLambdaDirective() bool @@ -133,10 +139,10 @@ type Field interface { Location() x.Location DgraphPredicate() string Operation() Operation - // AbstractType tells us whether this field represents a GraphQL Interface. + // AbstractType tells us whether this field represents a GraphQL Interface/Union. AbstractType() bool - IncludeInterfaceField(types []interface{}) bool - TypeName(dgraphTypes []interface{}) string + IncludeAbstractField(types []string) bool + TypeName(dgraphTypes []string) string GetObjectName() string IsAuthQuery() bool CustomHTTPConfig() (FieldHTTPConfig, error) @@ -146,6 +152,24 @@ type Field interface { DgraphPredicateForAggregateField() string IsAggregateField() bool GqlErrorf(path []interface{}, message string, args ...interface{}) *x.GqlError + // MaxPathLength finds the max length (including list indexes) of any path in the 'query' f. + // Used to pre-allocate a path buffer of the correct size before running completeObject on + // the top level query - means that we aren't reallocating slices multiple times + // during the complete* functions. + MaxPathLength() int + // NullValue returns the appropriate null bytes to be written as value in the JSON response for + // this field. + // * If this field is a list field then it returns []byte("[]"). + // * If it is nullable, it returns []byte("null"). + // * Otherwise, this field is non-nullable and so it will return a nil slice to indicate that. + NullValue() []byte + // NullResponse returns the bytes representing a JSON object to be used for setting the Data + // field of a Resolved, when this field resolves to a NullValue. + // * If this field is a list field then it returns []byte(`{"fieldAlias":[]}`). + // * If it is nullable, it returns []byte(`{"fieldAlias":null}`). + // * Otherwise, this field is non-nullable and so it will return a nil slice to indicate that. + // This is useful only for top-level fields like a query or mutation. + NullResponse() []byte } // A Mutation is a field (from the schema's Mutation type) from an Operation @@ -820,6 +844,68 @@ func (f *field) GqlErrorf(path []interface{}, message string, args ...interface{ } } +func (f *field) MaxPathLength() int { + childMax := 0 + for _, child := range f.SelectionSet() { + d := child.MaxPathLength() + if d > childMax { + childMax = d + } + } + if f.Type().ListType() != nil { + // It's f: [...], so add a space for field name and + // a space for the index into the list + return 2 + childMax + } + + return 1 + childMax +} + +func (f *field) NullValue() []byte { + typ := f.Type() + if typ.ListType() != nil { + // We could choose to set this to null. This is our decision, not + // anything required by the GraphQL spec. + // + // However, if we query, for example, for a person's friends with + // some restrictions, and there aren't any, is that really a case to + // set this at null and error if the list is required? What + // about if a person has just been added and doesn't have any friends? + // Doesn't seem right to add null and cause error propagation. + // + // Seems best if we pick [], rather than null, as the list value if + // there's nothing in the Dgraph result. + return JsonEmptyList + } + + if typ.Nullable() { + return JsonNull + } + + // this is a non-nullable field, so return a nil slice to indicate that. + return nil +} + +func (f *field) NullResponse() []byte { + val := f.NullValue() + if val == nil { + // this is a non-nullable field, so return a nil slice to indicate that. + return nil + } + + key := []byte(f.ResponseName()) + + buf := make([]byte, 0, 5+len(key)+len(val)) // 5 = 2 + 2 + 1 + buf = append(buf, '{', '"') + buf = append(buf, key...) + buf = append(buf, '"', ':') + buf = append(buf, val...) + buf = append(buf, '}') + + // finally return a JSON like: {"fieldAlias":null} + return buf +} + func (f *field) Arguments() map[string]interface{} { if f.arguments == nil { // Compute and cache the map first time this function is called for a field. @@ -864,6 +950,25 @@ func (f *field) Include() bool { return dir.ArgumentMap(f.op.vars)["if"].(bool) } +func (f *field) SkipField(dgraphTypes []string, seenField map[string]bool) bool { + if f.Skip() || !f.Include() { + return true + } + // If typ is an abstract type, and typename is a concrete type, then we ignore fields which + // aren't part of that concrete type. This would happen when multiple fragments (belonging + // to different concrete types) are requested within a query for an abstract type. + if len(dgraphTypes) > 0 && !f.IncludeAbstractField(dgraphTypes) { + return true + } + // if the field has already been seen at the current level, then we need to skip it. + // Otherwise, mark it seen. + if seenField[f.ResponseName()] { + return true + } + seenField[f.ResponseName()] = true + return false +} + func (f *field) Cascade() []string { dir := f.field.Directives.ForName(cascadeDirective) if dir == nil { @@ -1233,14 +1338,9 @@ func (f *field) DgraphPredicate() string { return f.op.inSchema.dgraphPredicate[f.field.ObjectDefinition.Name][f.Name()] } -func (f *field) TypeName(dgraphTypes []interface{}) string { +func (f *field) TypeName(dgraphTypes []string) string { for _, typ := range dgraphTypes { - styp, ok := typ.(string) - if !ok { - continue - } - - for _, origTyp := range f.op.inSchema.typeNameAst[styp] { + for _, origTyp := range f.op.inSchema.typeNameAst[typ] { if origTyp.Kind != ast.Object { continue } @@ -1251,15 +1351,11 @@ func (f *field) TypeName(dgraphTypes []interface{}) string { return f.GetObjectName() } -func (f *field) IncludeInterfaceField(dgraphTypes []interface{}) bool { +func (f *field) IncludeAbstractField(dgraphTypes []string) bool { // Given a list of dgraph types, we query the schema and find the one which is an ast.Object // and not an Interface object. for _, typ := range dgraphTypes { - styp, ok := typ.(string) - if !ok { - continue - } - for _, origTyp := range f.op.inSchema.typeNameAst[styp] { + for _, origTyp := range f.op.inSchema.typeNameAst[typ] { if origTyp.Kind == ast.Object { // For fields coming from fragments inside an abstract type, there are two cases: // * If the field is from an interface implemented by this object, and was fetched @@ -1300,6 +1396,18 @@ func (q *query) GqlErrorf(path []interface{}, message string, args ...interface{ return (*field)(q).GqlErrorf(path, message, args...) } +func (q *query) MaxPathLength() int { + return (*field)(q).MaxPathLength() +} + +func (q *query) NullValue() []byte { + return (*field)(q).NullValue() +} + +func (q *query) NullResponse() []byte { + return (*field)(q).NullResponse() +} + func (q *query) AuthFor(typ Type, jwtVars map[string]interface{}) Query { // copy the template, so that multiple queries can run rewriting for the rule. return &query{ @@ -1353,6 +1461,10 @@ func (q *query) Include() bool { return true } +func (q *query) SkipField(dgraphTypes []string, seenField map[string]bool) bool { + return (*field)(q).SkipField(dgraphTypes, seenField) +} + func (q *query) Cascade() []string { return (*field)(q).Cascade() } @@ -1548,12 +1660,12 @@ func (q *query) AbstractType() bool { return (*field)(q).AbstractType() } -func (q *query) TypeName(dgraphTypes []interface{}) string { +func (q *query) TypeName(dgraphTypes []string) string { return (*field)(q).TypeName(dgraphTypes) } -func (q *query) IncludeInterfaceField(dgraphTypes []interface{}) bool { - return (*field)(q).IncludeInterfaceField(dgraphTypes) +func (q *query) IncludeAbstractField(dgraphTypes []string) bool { + return (*field)(q).IncludeAbstractField(dgraphTypes) } func (m *mutation) Name() string { @@ -1592,6 +1704,10 @@ func (m *mutation) Include() bool { return true } +func (m *mutation) SkipField(dgraphTypes []string, seenField map[string]bool) bool { + return (*field)(m).SkipField(dgraphTypes, seenField) +} + func (m *mutation) Cascade() []string { return (*field)(m).Cascade() } @@ -1706,12 +1822,12 @@ func (m *mutation) DgraphPredicate() string { return (*field)(m).DgraphPredicate() } -func (m *mutation) TypeName(dgraphTypes []interface{}) string { +func (m *mutation) TypeName(dgraphTypes []string) string { return (*field)(m).TypeName(dgraphTypes) } -func (m *mutation) IncludeInterfaceField(dgraphTypes []interface{}) bool { - return (*field)(m).IncludeInterfaceField(dgraphTypes) +func (m *mutation) IncludeAbstractField(dgraphTypes []string) bool { + return (*field)(m).IncludeAbstractField(dgraphTypes) } func (m *mutation) IsAuthQuery() bool { @@ -1726,6 +1842,18 @@ func (m *mutation) GqlErrorf(path []interface{}, message string, args ...interfa return (*field)(m).GqlErrorf(path, message, args...) } +func (m *mutation) MaxPathLength() int { + return (*field)(m).MaxPathLength() +} + +func (m *mutation) NullValue() []byte { + return (*field)(m).NullValue() +} + +func (m *mutation) NullResponse() []byte { + return (*field)(m).NullResponse() +} + func (t *astType) AuthRules() *TypeAuth { return t.inSchema.authRules[t.DgraphName()] } diff --git a/query/outputnode.go b/query/outputnode.go index afec47925e4..1069200e8d8 100644 --- a/query/outputnode.go +++ b/query/outputnode.go @@ -1098,7 +1098,8 @@ func (sg *SubGraph) toFastJSON(l *Latency, field gqlSchema.Field) ([]byte, error buf: &bufw, dgraphTypeAttrId: enc.idForAttr("dgraph.type"), } - if !enc.encodeGraphQL(gqlEncCtx, n, true, []gqlSchema.Field{field}, nil, nil) { + if !enc.encodeGraphQL(gqlEncCtx, n, true, []gqlSchema.Field{field}, nil, + make([]interface{}, 0, field.MaxPathLength())) { // if enc.encodeGraphQL() didn't finish successfully here, that means we need to send // data as null in the GraphQL response like this: // { diff --git a/query/outputnode_graphql.go b/query/outputnode_graphql.go index fbc38a434b4..f2a860c0a3a 100644 --- a/query/outputnode_graphql.go +++ b/query/outputnode_graphql.go @@ -20,15 +20,12 @@ import ( "bytes" "encoding/json" "fmt" + "strconv" gqlSchema "github.com/dgraph-io/dgraph/graphql/schema" "github.com/dgraph-io/dgraph/x" ) -var ( - jsonNull = []byte("null") -) - type graphQLEncodingCtx struct { // buf is the buffer which stores the encoded GraphQL response. buf *bytes.Buffer @@ -52,10 +49,42 @@ func writeKeyGraphQL(field gqlSchema.Field, out *bytes.Buffer) { x.Check2(out.WriteString(`":`)) } +func cantCoerceScalar(val []byte, field gqlSchema.Field) bool { + switch field.Type().Name() { + case "Int": + // Although GraphQL layer would have input coercion for Int, + // we still need to do this as there can be cases like schema migration when Int64 was + // changed to Int, or if someone was using DQL mutations but GraphQL queries. The GraphQL + // layer must always honor the spec. + // valToBytes() uses []byte(strconv.FormatInt(num, 10)) to convert int values to byte slice. + // so, we should do the reverse, parse the string back to int and check that it fits in the + // range of int32. + // TODO: think if we can do this check in a better way without parsing the bytes. + if _, err := strconv.ParseInt(string(val), 10, 32); err != nil { + return true + } + case "String", "ID", "Boolean", "Int64", "Float", "DateTime": + // do nothing, as for these types the GraphQL schema is same as the dgraph schema. + // Hence, the value coming in from fastJson node should already be in the correct form. + // So, no need to coerce it. + default: + enumValues := field.EnumValues() + // At this point we should only get fields which are of ENUM type, so we can return + // an error if we don't get any enum values. + if len(enumValues) == 0 { + return true + } + // Lets check that the enum value is valid. + strVal := string(val) + strVal = strVal[1 : len(strVal)-1] // remove `"` from beginning and end + if !x.HasString(enumValues, strVal) { + return true + } + } + return false +} + // TODO: -// * Scalar coercion -// * Enums -// * Password queries // * (cleanup) Cleanup code from resolve pkg // * (cleanup) make const args like *bytes.Buffer the first arg in funcs as a best practice. // * (cleanup) refactor this func as `func (ctx *graphQLEncodingCtx) encode(enc *encoder, ...)`. @@ -99,9 +128,19 @@ func (enc *encoder) encodeGraphQL(ctx *graphQLEncodingCtx, fj fastJsonNode, fjIs return false } } else { + // we got a scalar + // check coercion rules to see if it matches the GraphQL spec requirements. + if cantCoerceScalar(val, parentField) { + ctx.gqlErrs = append(ctx.gqlErrs, parentField.GqlErrorf(parentPath, + "Error coercing value '%s' for field '%s' to type %s.", + string(val), parentField.Name(), parentField.Type().Name())) + // if it can't be coerced, return false so that the caller can appropriately + // handle null writing + return false + } x.Check2(ctx.buf.Write(val)) } - // we have successfully written the scalar value, lets return true to indicate that this + // we have successfully written the value, lets return true to indicate that this // call to encodeGraphQL() was successful. return true } @@ -120,7 +159,7 @@ func (enc *encoder) encodeGraphQL(ctx *graphQLEncodingCtx, fj fastJsonNode, fjIs // if GraphQL layer requested dgraph.type predicate, then it would always be the first child in // the response as it is always written first in DQL query. So, if we get data for dgraph.type // predicate then just save it in dgraphTypes slice, no need to write it to JSON yet. - var dgraphTypes []interface{} + var dgraphTypes []string for enc.getAttr(child) == ctx.dgraphTypeAttrId { val, err := enc.getScalarVal(child) // val is a quoted string like: "Human" if err != nil { @@ -182,7 +221,7 @@ func (enc *encoder) encodeGraphQL(ctx *graphQLEncodingCtx, fj fastJsonNode, fjIs if cnt == 1 { // we need to check if the field should be skipped only when it is encountered for // the first time - if skipField(curSelection, dgraphTypes, seenField) { + if curSelection.SkipField(dgraphTypes, seenField) { cnt = 0 // Reset the count, // indicating that we need to write the JSON key in next iteration. i++ @@ -286,8 +325,8 @@ func (enc *encoder) encodeGraphQL(ctx *graphQLEncodingCtx, fj fastJsonNode, fjIs // which may trigger the object to turn out to be null. if !enc.encodeGraphQL(ctx, cur, false, curSelection.SelectionSet(), curSelection, append(parentPath, curSelection.ResponseName(), cnt-1)) { - // Unlike the choice in writeGraphQLNull(), where we turn missing - // lists into [], the spec explicitly calls out: + // Unlike the choice in curSelection.NullValue(), where we turn missing + // list fields into [], the spec explicitly calls out: // "If a List type wraps a Non-Null type, and one of the // elements of that list resolves to null, then the entire list // must resolve to null." @@ -303,10 +342,10 @@ func (enc *encoder) encodeGraphQL(ctx *graphQLEncodingCtx, fj fastJsonNode, fjIs typ := curSelection.Type() if typ.ListType().Nullable() { ctx.buf.Truncate(itemPos) - x.Check2(ctx.buf.Write(jsonNull)) + x.Check2(ctx.buf.Write(gqlSchema.JsonNull)) } else if typ.Nullable() { ctx.buf.Truncate(keyEndPos) - x.Check2(ctx.buf.Write(jsonNull)) + x.Check2(ctx.buf.Write(gqlSchema.JsonNull)) // set nullWritten to true so we don't write closing ] for this list nullWritten = true // skip all data for the current list selection @@ -408,7 +447,7 @@ func (enc *encoder) encodeGraphQL(ctx *graphQLEncodingCtx, fj fastJsonNode, fjIs for i < len(childSelectionSet) { curSelection = childSelectionSet[i] - if skipField(curSelection, dgraphTypes, seenField) { + if curSelection.SkipField(dgraphTypes, seenField) { i++ // if this is the last field and shouldn't be included, // then need to remove comma from the buffer if one was present. @@ -448,28 +487,6 @@ func (enc *encoder) encodeGraphQL(ctx *graphQLEncodingCtx, fj fastJsonNode, fjIs return true } -func skipField(f gqlSchema.Field, dgraphTypes []interface{}, seenField map[string]bool) bool { - if f.Skip() || !f.Include() { - return true - } - // If typ is an interface, and dgraphTypes contains another type, then we ignore - // fields which don't start with that type. This would happen when multiple - // fragments (belonging to different types) are requested within a query for an interface. - - // If the dgraphPredicate doesn't start with the typ.Name(), then this field belongs to - // a concrete type, lets check that it has inputType as the prefix, otherwise skip it. - if len(dgraphTypes) > 0 && !f.IncludeInterfaceField(dgraphTypes) { - return true - } - // if the field has already been seen at the current level, then we need to skip it. - // Otherwise, mark it seen. - if seenField[f.ResponseName()] { - return true - } - seenField[f.ResponseName()] = true - return false -} - func checkAndStripComma(buf *bytes.Buffer) { b := buf.Bytes() if len(b) > 0 && b[len(b)-1] == ',' { @@ -477,32 +494,17 @@ func checkAndStripComma(buf *bytes.Buffer) { } } -func getTypename(f gqlSchema.Field, dgraphTypes []interface{}) []byte { - // TODO (cleanup): refactor the TypeName method to accept []string +func getTypename(f gqlSchema.Field, dgraphTypes []string) []byte { return []byte(`"` + f.TypeName(dgraphTypes) + `"`) } func writeGraphQLNull(f gqlSchema.Field, buf *bytes.Buffer, keyEndPos int) bool { - buf.Truncate(keyEndPos) // truncate to make sure we write null correctly - if f.Type().ListType() != nil { - // We could choose to set this to null. This is our decision, not - // anything required by the GraphQL spec. - // - // However, if we query, for example, for a person's friends with - // some restrictions, and there aren't any, is that really a case to - // set this at null and error if the list is required? What - // about if a person has just been added and doesn't have any friends? - // Doesn't seem right to add null and cause error propagation. - // - // Seems best if we pick [], rather than null, as the list value if - // there's nothing in the Dgraph result. - x.Check2(buf.Write([]byte("[]"))) - } else if f.Type().Nullable() { - x.Check2(buf.Write(jsonNull)) - } else { - return false + if b := f.NullValue(); b != nil { + buf.Truncate(keyEndPos) // truncate to make sure we write null correctly + x.Check2(buf.Write(b)) + return true } - return true + return false } // completeRootAggregateQuery builds GraphQL JSON for aggregate queries at root. @@ -542,7 +544,7 @@ func writeGraphQLNull(f gqlSchema.Field, buf *bytes.Buffer, keyEndPos int) bool func completeRootAggregateQuery(enc *encoder, ctx *graphQLEncodingCtx, fj fastJsonNode, query gqlSchema.Field, qryPath []interface{}) fastJsonNode { if enc.children(fj) == nil { - x.Check2(ctx.buf.Write(jsonNull)) + x.Check2(ctx.buf.Write(gqlSchema.JsonNull)) return fj.next } @@ -563,7 +565,7 @@ func completeRootAggregateQuery(enc *encoder, ctx *graphQLEncodingCtx, fj fastJs ctx.gqlErrs = append(ctx.gqlErrs, f.GqlErrorf(append(qryPath, f.ResponseName()), err.Error())) // all aggregate properties are nullable, so no special checks are required - val = jsonNull + val = gqlSchema.JsonNull } fj = fj.next } @@ -627,11 +629,11 @@ func completeAggregateChildren(enc *encoder, ctx *graphQLEncodingCtx, fj fastJso ctx.gqlErrs = append(ctx.gqlErrs, f.GqlErrorf(append(fieldPath, f.ResponseName()), err.Error())) // all aggregate properties are nullable, so no special checks are required - val = jsonNull + val = gqlSchema.JsonNull } fj = fj.next } else { - val = jsonNull + val = gqlSchema.JsonNull } x.Check2(ctx.buf.Write(val)) comma = ","