Skip to content

Commit

Permalink
add @unique support in schema
Browse files Browse the repository at this point in the history
  • Loading branch information
shivaji-kharse committed May 22, 2023
1 parent 595b72d commit 3475021
Show file tree
Hide file tree
Showing 4 changed files with 43 additions and 4 deletions.
8 changes: 8 additions & 0 deletions protos/pb/pb.pb.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion schema/parse.go
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,9 @@ func parseDirective(it *lex.ItemIterator, schema *pb.SchemaUpdate, t types.TypeI
schema.Count = true
case "upsert":
schema.Upsert = true
case "unique":
schema.Directive = pb.SchemaUpdate_INDEX
schema.Unique = true
case "noconflict":
schema.NoConflict = true
case "lang":
Expand Down Expand Up @@ -261,7 +264,7 @@ func resolveTokenizers(updates []*pb.SchemaUpdate) error {
continue
}

if len(schema.Tokenizer) == 0 && schema.Directive == pb.SchemaUpdate_INDEX {
if len(schema.Tokenizer) == 0 && schema.Directive == pb.SchemaUpdate_INDEX && !schema.Unique {
return errors.Errorf("Require type of tokenizer for pred: %s of type: %s for indexing.",
schema.Predicate, typ.Name())
} else if len(schema.Tokenizer) > 0 && schema.Directive != pb.SchemaUpdate_INDEX {
Expand Down
30 changes: 28 additions & 2 deletions worker/mutation.go
Original file line number Diff line number Diff line change
Expand Up @@ -399,11 +399,11 @@ func checkSchema(s *pb.SchemaUpdate) error {
x.ParseAttr(s.Predicate))
}

if s.Directive == pb.SchemaUpdate_INDEX && len(s.Tokenizer) == 0 {
if s.Directive == pb.SchemaUpdate_INDEX && len(s.Tokenizer) == 0 && !s.Unique {
return errors.Errorf("Tokenizer must be specified while indexing a predicate: %+v", s)
}

if len(s.Tokenizer) > 0 && s.Directive != pb.SchemaUpdate_INDEX {
if len(s.Tokenizer) > 0 && s.Directive != pb.SchemaUpdate_INDEX && !s.Unique {
return errors.Errorf("Directive must be SchemaUpdate_INDEX when a tokenizer is specified")
}

Expand All @@ -424,6 +424,32 @@ func checkSchema(s *pb.SchemaUpdate) error {
x.ParseAttr(s.Predicate))
}

if s.Unique {
if s.ValueType == pb.Posting_STRING {
checkTerm := func() bool {
for _, value := range s.Tokenizer {
return value == "term"
}
return false
}
if len(s.Tokenizer) == 0 {
s.Tokenizer = append(s.Tokenizer, "exact")
} else if checkTerm() {
return errors.New("term index is not allowed")
}
}
if s.ValueType == pb.Posting_INT {
if len(s.Tokenizer) == 0 {
s.Tokenizer = append(s.Tokenizer, "int")
}
}
if s.ValueType == pb.Posting_FLOAT {
if len(s.Tokenizer) == 0 {
s.Tokenizer = append(s.Tokenizer, "float")
}
}
}

t, err := schema.State().TypeOf(s.Predicate)
if err != nil {
// No schema previously defined, so no need to do checks about schema conversions.
Expand Down
4 changes: 3 additions & 1 deletion worker/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ func getSchema(ctx context.Context, s *pb.SchemaRequest) (*pb.SchemaResult, erro
if len(s.Fields) > 0 {
fields = s.Fields
} else {
fields = []string{"type", "index", "tokenizer", "reverse", "count", "list", "upsert",
fields = []string{"type", "index", "tokenizer", "reverse", "count", "list", "upsert", "unique",
"lang", "noconflict"}
}

Expand Down Expand Up @@ -110,6 +110,8 @@ func populateSchema(attr string, fields []string) *pb.SchemaNode {
schemaNode.List = pred.GetList()
case "upsert":
schemaNode.Upsert = pred.GetUpsert()
case "unique":
schemaNode.Upsert = pred.GetUnique()
case "lang":
schemaNode.Lang = pred.GetLang()
case "noconflict":
Expand Down

0 comments on commit 3475021

Please sign in to comment.