Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

sql: Bugfixes and increased tests for IndexSkipTableReader #39833

Merged
merged 1 commit into from
Aug 23, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 11 additions & 30 deletions pkg/sql/distsqlrun/index_skip_table_reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,23 +181,13 @@ func (t *indexSkipTableReader) Next() (sqlbase.EncDatumRow, *distsqlpb.ProducerM
}
}

var key []byte
{
// Fetch the PartialKey in a separate scope so that it cannot be reused
// outside this scope.
unsafeKey, err := t.fetcher.PartialKey(t.keyPrefixLen)
if err != nil {
t.MoveToDraining(err)
return nil, &distsqlpb.ProducerMetadata{Err: err}
}
// Modifying unsafeKey in place would corrupt our current row, so make a
// copy.
// A key needs to be allocated every time because it is illegal to mutate
// batch requests which would end up happening if we used scratch space.
// TODO(asubiotto): Another option would be to construct a key after
// returning a row (at the top of this loop).
key = make([]byte, len(unsafeKey))
copy(key, unsafeKey)
// This key *must not* be modified, as this will cause the fetcher
// to begin acting incorrectly. This is because modifications
// will corrupt the row internal to the fetcher.
key, err := t.fetcher.PartialKey(t.keyPrefixLen)
if err != nil {
t.MoveToDraining(err)
return nil, &distsqlpb.ProducerMetadata{Err: err}
}

row, _, _, err := t.fetcher.NextRow(t.Ctx)
Expand All @@ -212,19 +202,10 @@ func (t *indexSkipTableReader) Next() (sqlbase.EncDatumRow, *distsqlpb.ProducerM
}

if !t.reverse {
// 0xff is the largest prefix marker for any encoded key. To ensure that
// our new key is larger than any value with the same prefix, we place
// 0xff at all other index column values, and one more to guard against
// 0xff present as a value in the table (0xff encodes a type of null).
// TODO(asubiotto): We should delegate to PrefixEnd here (it exists for
// this purpose and is widely used), but we still need to handle maximal
// keys. Maybe we should just exit early (t.currentSpan++) when we
// encounter one. We also need a test to ensure that we behave properly
// when we encounter maximal (i.e. all nulls) prefix keys.
for i := 0; i < (t.indexLen - t.keyPrefixLen + 1); i++ {
key = append(key, 0xff)
}
t.spans[t.currentSpan].Key = key
// We set the new key to be the largest key with the prefix that we have
// so that we skip all values with the same prefix, and "skip" to the
// next distinct value.
t.spans[t.currentSpan].Key = key.PrefixEnd()
} else {
// In the case of reverse, this is much easier. The reverse fetcher
// returns the key retrieved, in this case the first key smaller
Expand Down
45 changes: 45 additions & 0 deletions pkg/sql/distsqlrun/index_skip_table_reader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,12 +141,29 @@ func TestIndexSkipTableReader(t *testing.T) {
99,
sqlutils.ToRowFn(xFnt6, yFnt6))

// create a table t7 where each row is:
//
// | x | y | z |
// |-------------------------|
// | rowId%10 | NULL | NULL|
xFnt7 := func(row int) tree.Datum {
return tree.NewDInt(tree.DInt(row % 10))
}
nullt7 := func(_ int) tree.Datum {
return tree.DNull
}
sqlutils.CreateTable(t, sqlDB, "t7",
"x INT, y INT, z INT, PRIMARY KEY (x), INDEX i1 (x, y DESC, z DESC), INDEX i2 (y DESC, z DESC)",
10,
sqlutils.ToRowFn(xFnt7, nullt7, nullt7))

td1 := sqlbase.GetTableDescriptor(kvDB, "test", "t1")
td2 := sqlbase.GetTableDescriptor(kvDB, "test", "t2")
td3 := sqlbase.GetTableDescriptor(kvDB, "test", "t3")
td4 := sqlbase.GetTableDescriptor(kvDB, "test", "t4")
td5 := sqlbase.GetTableDescriptor(kvDB, "test", "t5")
td6 := sqlbase.GetTableDescriptor(kvDB, "test", "t6")
td7 := sqlbase.GetTableDescriptor(kvDB, "test", "t7")

makeIndexSpan := func(td *sqlbase.TableDescriptor, start, end int) distsqlpb.TableReaderSpan {
var span roachpb.Span
Expand Down Expand Up @@ -369,6 +386,34 @@ func TestIndexSkipTableReader(t *testing.T) {
},
expected: "[[10] [9] [8] [7] [6] [5] [4] [3] [2] [1]]",
},
{
// Distinct scan on index with multiple null values
desc: "IndexMultipleNulls",
tableDesc: td7,
spec: distsqlpb.IndexSkipTableReaderSpec{
Spans: []distsqlpb.TableReaderSpan{{Span: td7.IndexSpan(2)}},
IndexIdx: 1,
},
post: distsqlpb.PostProcessSpec{
Projection: true,
OutputColumns: []uint32{0},
},
expected: "[[0] [1] [2] [3] [4] [5] [6] [7] [8] [9]]",
},
{
// Distinct scan on index with only null values
desc: "IndexAllNulls",
tableDesc: td7,
spec: distsqlpb.IndexSkipTableReaderSpec{
Spans: []distsqlpb.TableReaderSpan{{Span: td7.IndexSpan(3)}},
IndexIdx: 2,
},
post: distsqlpb.PostProcessSpec{
Projection: true,
OutputColumns: []uint32{1},
},
expected: "[[NULL]]",
},
}

for _, c := range testCases {
Expand Down