Skip to content

Commit

Permalink
Merge #39059 #39143
Browse files Browse the repository at this point in the history
39059: cli: Add locality information to cockroach node status command. r=rohany a=rohany

Addressing #38438.

Release note (cli change): Add locality information to `cockroach
node status` command.

39143: exec: not plan a redundant projection r=yuzefovich a=yuzefovich

In a degenerate case when the input operator produces the batches
that match the desired projection, we don't need to plan a
projection operator.

Release note: None

Co-authored-by: Rohan Yadav <rohany@alumni.cmu.edu>
Co-authored-by: Yahor Yuzefovich <yahor@cockroachlabs.com>
  • Loading branch information
3 people committed Jul 29, 2019
3 parents 91a4688 + 888813c + 4356726 commit 45a1f49
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 25 deletions.
8 changes: 5 additions & 3 deletions pkg/cli/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,7 @@ var baseNodeColumnHeaders = []string{
"build",
"started_at",
"updated_at",
"locality",
"is_available",
"is_live",
}
Expand Down Expand Up @@ -149,12 +150,13 @@ func runStatusNodeInner(showDecommissioned bool, args []string) ([]string, [][]s
address,
build_tag AS build,
started_at,
updated_at,
updated_at,
locality,
CASE WHEN split_part(expiration,',',1)::decimal > now()::decimal
THEN true
ELSE false
END AS is_available,
ifnull(is_live, false)
ifnull(is_live, false)
FROM crdb_internal.gossip_liveness LEFT JOIN crdb_internal.gossip_nodes USING (node_id)`,
)

Expand Down Expand Up @@ -250,7 +252,7 @@ func getStatusNodeHeaders() []string {
}

func getStatusNodeAlignment() string {
align := "rllll"
align := "rlllll"
if nodeCtx.statusShowAll || nodeCtx.statusShowRanges {
align += "rrrrrr"
}
Expand Down
24 changes: 12 additions & 12 deletions pkg/cmd/roachtest/decommission.go
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,7 @@ func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) {
"Please verify cluster health before removing the nodes.",
}
statusHeader := []string{
"id", "address", "build", "started_at", "updated_at", "is_available", "is_live",
"id", "address", "build", "started_at", "updated_at", "locality", "is_available", "is_live",
}
waitLiveDeprecated := "--wait=live is deprecated and is treated as --wait=all"

Expand Down Expand Up @@ -385,10 +385,10 @@ func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) {
}
exp := [][]string{
statusHeader,
{`1`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`2`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`3`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`4`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`1`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`2`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`3`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`4`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
}
if err := matchCSV(o, exp); err != nil {
t.Fatal(err)
Expand Down Expand Up @@ -579,9 +579,9 @@ func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) {

exp := [][]string{
statusHeader,
{`2`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`3`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`4`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`2`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`3`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`4`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
}
if err := matchCSV(o, exp); err != nil {
time.Sleep(time.Second)
Expand All @@ -608,10 +608,10 @@ func runDecommissionAcceptance(ctx context.Context, t *test, c *cluster) {

exp := [][]string{
statusHeader,
{`2`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`3`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`4`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`5`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`2`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`3`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`4`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
{`5`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`, `.*`},
}
return matchCSV(o, exp)
}); err != nil {
Expand Down
8 changes: 4 additions & 4 deletions pkg/sql/distsqlrun/column_exec_setup.go
Original file line number Diff line number Diff line change
Expand Up @@ -543,7 +543,7 @@ func newColOperator(
projection = append(projection, i)
}
projection = append(projection, wf.OutputColIdx+1)
result.op = exec.NewSimpleProjectOp(result.op, projection)
result.op = exec.NewSimpleProjectOp(result.op, int(wf.OutputColIdx+1), projection)
}

columnTypes = append(spec.Input[0].ColumnTypes, *semtypes.Int)
Expand Down Expand Up @@ -590,11 +590,11 @@ func newColOperator(
for i := range columnTypes {
outputColumns = append(outputColumns, uint32(i))
}
result.op = exec.NewSimpleProjectOp(result.op, outputColumns)
result.op = exec.NewSimpleProjectOp(result.op, len(filterColumnTypes), outputColumns)
}
}
if post.Projection {
result.op = exec.NewSimpleProjectOp(result.op, post.OutputColumns)
result.op = exec.NewSimpleProjectOp(result.op, len(columnTypes), post.OutputColumns)
// Update output columnTypes.
newTypes := make([]semtypes.T, 0, len(post.OutputColumns))
for _, j := range post.OutputColumns {
Expand Down Expand Up @@ -625,12 +625,12 @@ func newColOperator(
result.memUsage += renderMem
renderedCols = append(renderedCols, uint32(outputIdx))
}
result.op = exec.NewSimpleProjectOp(result.op, len(columnTypes), renderedCols)
newTypes := make([]semtypes.T, 0, len(renderedCols))
for _, j := range renderedCols {
newTypes = append(newTypes, columnTypes[j])
}
columnTypes = newTypes
result.op = exec.NewSimpleProjectOp(result.op, renderedCols)
}
if post.Offset != 0 {
result.op = exec.NewOffsetOp(result.op, post.Offset)
Expand Down
19 changes: 16 additions & 3 deletions pkg/sql/exec/simple_project.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,22 @@ func (b *projectingBatch) AppendCol(t types.T) {
}

// NewSimpleProjectOp returns a new simpleProjectOp that applies a simple
// projection on the columns in its input batch, returning a new batch with only
// the columns in the projection slice, in order.
func NewSimpleProjectOp(input Operator, projection []uint32) Operator {
// projection on the columns in its input batch, returning a new batch with
// only the columns in the projection slice, in order. In a degenerate case
// when input already outputs batches that satisfy the projection, a
// simpleProjectOp is not planned and input is returned.
func NewSimpleProjectOp(input Operator, numInputCols int, projection []uint32) Operator {
if numInputCols == len(projection) {
projectionIsRedundant := true
for i := range projection {
if projection[i] != uint32(i) {
projectionIsRedundant = false
}
}
if projectionIsRedundant {
return input
}
}
return &simpleProjectOp{
input: input,
batch: newProjectionBatch(projection),
Expand Down
19 changes: 16 additions & 3 deletions pkg/sql/exec/simple_project_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,13 @@

package exec

import "testing"
import (
"testing"

"github.com/cockroachdb/cockroach/pkg/sql/exec/coldata"
"github.com/cockroachdb/cockroach/pkg/sql/exec/types"
"github.com/stretchr/testify/require"
)

func TestSimpleProjectOp(t *testing.T) {
tcs := []struct {
Expand Down Expand Up @@ -54,13 +60,20 @@ func TestSimpleProjectOp(t *testing.T) {
}
for _, tc := range tcs {
runTests(t, []tuples{tc.tuples}, tc.expected, orderedVerifier, []int{0, 1}, func(input []Operator) (Operator, error) {
return NewSimpleProjectOp(input[0], tc.colsToKeep), nil
return NewSimpleProjectOp(input[0], len(tc.tuples[0]), tc.colsToKeep), nil
})
}

// Empty projection.
runTests(t, []tuples{{{1, 2, 3}, {1, 2, 3}}}, tuples{{}, {}}, orderedVerifier, []int{},
func(input []Operator) (Operator, error) {
return NewSimpleProjectOp(input[0], nil), nil
return NewSimpleProjectOp(input[0], 3 /* numInputCols */, nil), nil
})

t.Run("RedundantProjectionIsNotPlanned", func(t *testing.T) {
typs := []types.T{types.Int64, types.Int64}
input := newFiniteBatchSource(coldata.NewMemBatch(typs), 1 /* usableCount */)
projectOp := NewSimpleProjectOp(input, len(typs), []uint32{0, 1})
require.IsType(t, input, projectOp)
})
}

0 comments on commit 45a1f49

Please sign in to comment.