Skip to content

Commit

Permalink
sql: support a limited form of arrays
Browse files Browse the repository at this point in the history
This commit introduces support for an ARRAY column type as described in
the RFC (cockroachdb#16172) with the following limitations:

* NULL values in arrays are currently not supported
* Collated strings as array contents are currently not supported
* No additional operations on arrays have been implemented
* Arrays are only 1-dimensional

This commit also disallows using arrays as primary keys or as an indexed
column, however it's not clear to me yet if there are other situations
in which a value could become key-encoded. I think more testing is in
order, but I wanted to get some eyes on this.
  • Loading branch information
Justin Jaffray committed Jul 25, 2017
1 parent 1a85572 commit 40b86c7
Show file tree
Hide file tree
Showing 34 changed files with 3,713 additions and 3,002 deletions.
26 changes: 23 additions & 3 deletions docs/RFCS/array_encoding.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,26 @@ We also do not support the Postgres feature of lower bounds on dimensions
other than 1.

Like Postgres, we do not support nested array types.
This is somewhat obscured by Postgres' use of syntax, which appear to
describe nested arrays, but actually describe multidimensional arrays:
```sql
SELECT ARRAY[ARRAY[1,2,3], ARRAY[4,5,6]];
```
It's of note that this is not describing an array of arrays.
Rather, it's describing a two dimensional array. This is an
important distinction because it means that Postgres can't support
subarrays of different lengths:
```sql
SELECT ARRAY[ARRAY[1,2,3], ARRAY[4,5]];
```
is invalid.
For multidimensionality, we will only support this alternate syntax, which Postgres treats
as equivalent:
```sql
SELECT ARRAY[[1,2,3],[4,5,6]];
```

Arrays are limited to at most 2^32 elements, although it's likely that the 64MB
Arrays are limited to at most 2^31-1 elements, although it's likely that the 64MB
row size limit will be the proximal limiting factor to large arrays.

## Schema Definition
Expand Down Expand Up @@ -100,8 +118,10 @@ implementation.
Array values will be encoded using a format very similar to the one used in
Postgres. The format is:

* Array type tag
* Element type tag
* Byte type tag
* Length in bytes
* Length of the array, in bytes
* Value type tag
* A byte, encoding:
* The number of dimensions in the array as the high 4 bits
* A 4-bit flag bitmap, having all but the lowest bit reserved, with the lowest bit representing whether we have a NULL bitmap.
Expand Down
8 changes: 4 additions & 4 deletions pkg/internal/rsg/rsg.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,9 +241,7 @@ func (r *RSG) GenerateRandomArg(typ parser.Type) string {
case parser.TypeUUID:
u := uuid.MakeV4()
v = fmt.Sprintf(`'%s'`, u)
case parser.TypeIntArray,
parser.TypeStringArray,
parser.TypeOid,
case parser.TypeOid,
parser.TypeRegClass,
parser.TypeRegNamespace,
parser.TypeRegProc,
Expand All @@ -253,8 +251,10 @@ func (r *RSG) GenerateRandomArg(typ parser.Type) string {
parser.TypeAny:
v = "NULL"
default:
// Check types that can't be compared using equality
switch typ.(type) {
case parser.TTuple:
case parser.TTuple,
parser.TArray:
v = "NULL"
default:
panic(fmt.Errorf("unknown arg type: %s (%T)", typ, typ))
Expand Down
8 changes: 6 additions & 2 deletions pkg/sql/alter_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,9 @@ func (n *alterTableNode) Start(params runParams) error {

n.tableDesc.AddColumnMutation(*col, sqlbase.DescriptorMutation_ADD)
if idx != nil {
n.tableDesc.AddIndexMutation(*idx, sqlbase.DescriptorMutation_ADD)
if err := n.tableDesc.AddIndexMutation(*idx, sqlbase.DescriptorMutation_ADD); err != nil {
return err
}
}
if d.HasColumnFamily() {
err := n.tableDesc.AddColumnToFamilyMaybeCreate(
Expand Down Expand Up @@ -136,7 +138,9 @@ func (n *alterTableNode) Start(params runParams) error {
return fmt.Errorf("index %q being dropped, try again later", d.Name)
}
}
n.tableDesc.AddIndexMutation(idx, sqlbase.DescriptorMutation_ADD)
if err := n.tableDesc.AddIndexMutation(idx, sqlbase.DescriptorMutation_ADD); err != nil {
return err
}

case *parser.CheckConstraintTableDef:
ck, err := makeCheckConstraint(*n.tableDesc, d, inuseNames, params.p.session.SearchPath)
Expand Down
14 changes: 8 additions & 6 deletions pkg/sql/create.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,9 @@ func (n *createIndexNode) Start(params runParams) error {
}

mutationIdx := len(n.tableDesc.Mutations)
n.tableDesc.AddIndexMutation(indexDesc, sqlbase.DescriptorMutation_ADD)
if err := n.tableDesc.AddIndexMutation(indexDesc, sqlbase.DescriptorMutation_ADD); err != nil {
return err
}
if err := n.tableDesc.AllocateIDs(); err != nil {
return err
}
Expand Down Expand Up @@ -1222,18 +1224,18 @@ func MakeTableDesc(
for _, def := range n.Defs {
if d, ok := def.(*parser.ColumnTableDef); ok {
if !desc.IsVirtualTable() {
if _, ok := d.Type.(*parser.ArrayColType); ok {
return desc, pgerror.UnimplementedWithIssueErrorf(2115, "ARRAY column types are unsupported")
}
if _, ok := d.Type.(*parser.VectorColType); ok {
return desc, pgerror.UnimplementedWithIssueErrorf(2115, "VECTOR column types are unsupported")
return desc, pgerror.NewErrorf(
pgerror.CodeFeatureNotSupportedError,
"VECTOR column types are unsupported",
)
}
}

col, idx, err := sqlbase.MakeColumnDefDescs(d, searchPath, evalCtx)
if err != nil {
return desc, err
}

desc.AddColumn(*col)
if idx != nil {
if err := desc.AddIndex(*idx, d.PrimaryKey); err != nil {
Expand Down
3 changes: 1 addition & 2 deletions pkg/sql/distsql_physical_planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -225,8 +225,7 @@ func (dsp *distSQLPlanner) checkSupportForNode(node planNode) (distRecommendatio
case *renderNode:
for i, e := range n.render {
if typ := n.columns[i].Typ; typ.FamilyEqual(parser.TypeTuple) ||
typ.FamilyEqual(parser.TypeStringArray) ||
typ.FamilyEqual(parser.TypeIntArray) {
typ.FamilyEqual(parser.TypeArray) {
return 0, newQueryNotSupportedErrorf("unsupported render type %s", typ)
}
if err := dsp.checkExpr(e); err != nil {
Expand Down
4 changes: 3 additions & 1 deletion pkg/sql/drop.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,9 @@ func (p *planner) dropIndexByName(
found := false
for i := range tableDesc.Indexes {
if tableDesc.Indexes[i].ID == idx.ID {
tableDesc.AddIndexMutation(tableDesc.Indexes[i], sqlbase.DescriptorMutation_DROP)
if err := tableDesc.AddIndexMutation(tableDesc.Indexes[i], sqlbase.DescriptorMutation_DROP); err != nil {
return err
}
tableDesc.Indexes = append(tableDesc.Indexes[:i], tableDesc.Indexes[i+1:]...)
found = true
break
Expand Down
6 changes: 4 additions & 2 deletions pkg/sql/executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -1966,9 +1966,7 @@ func checkResultType(typ parser.Type) error {
case parser.TypeTimestampTZ:
case parser.TypeInterval:
case parser.TypeUUID:
case parser.TypeStringArray:
case parser.TypeNameArray:
case parser.TypeIntArray:
case parser.TypeOid:
case parser.TypeRegClass:
case parser.TypeRegNamespace:
Expand All @@ -1979,6 +1977,10 @@ func checkResultType(typ parser.Type) error {
// Compare all types that cannot rely on == equality.
istype := typ.FamilyEqual
switch {
case istype(parser.TypeArray):
if istype(parser.UnwrapType(typ).(parser.TArray).Typ) {
return errors.Errorf("arrays cannot have arrays as element type")
}
case istype(parser.TypeCollatedString):
case istype(parser.TypeTuple):
case istype(parser.TypePlaceholder):
Expand Down
Loading

0 comments on commit 40b86c7

Please sign in to comment.