diff --git a/DEPS.bzl b/DEPS.bzl index 3760e715deed..8667ebb14217 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -1263,10 +1263,10 @@ def go_deps(): patches = [ "@cockroach//build/patches:com_github_cockroachdb_pebble.patch", ], - sha256 = "0018bcef357bf7bba06d5e3eb35277709b5fd98ee437924001531fa935d8c76d", - strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20220126162719-a5c1766b568a", + sha256 = "e411c1b5f5c7d2ef9dc337615de7b51051a182bba9c298f540d74d95d8a8f279", + strip_prefix = "github.com/cockroachdb/pebble@v0.0.0-20220201221612-38b68e17aa97", urls = [ - "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220126162719-a5c1766b568a.zip", + "https://storage.googleapis.com/cockroach-godeps/gomod/github.com/cockroachdb/pebble/com_github_cockroachdb_pebble-v0.0.0-20220201221612-38b68e17aa97.zip", ], ) go_repository( diff --git a/Makefile b/Makefile index 8f7731e706da..16f1448c5d3e 100644 --- a/Makefile +++ b/Makefile @@ -979,7 +979,7 @@ $(go-targets): override LINKFLAGS += \ -X "github.com/cockroachdb/cockroach/pkg/build.rev=$(shell cat .buildinfo/rev)" \ -X "github.com/cockroachdb/cockroach/pkg/build.cgoTargetTriple=$(TARGET_TRIPLE)" \ $(if $(BUILDCHANNEL),-X "github.com/cockroachdb/cockroach/pkg/build.channel=$(BUILDCHANNEL)") \ - $(if $(BUILD_TAGGED_RELEASE),-X "github.com/cockroachdb/cockroach/pkg/util/log.crashReportEnv=$(if $(BUILDINFO_TAG),$(BUILDINFO_TAG),$(shell cat .buildinfo/tag))") + $(if $(BUILD_TAGGED_RELEASE),-X "github.com/cockroachdb/cockroach/pkg/util/log/logcrash.crashReportEnv=$(if $(BUILDINFO_TAG),$(BUILDINFO_TAG),$(shell cat .buildinfo/tag))") # The build.utcTime format must remain in sync with TimeFormat in # pkg/build/info.go. It is not installed in tests or in `buildshort` to avoid diff --git a/build/bazelutil/stamp.sh b/build/bazelutil/stamp.sh index 9d663d257f19..55bbd4b492b1 100755 --- a/build/bazelutil/stamp.sh +++ b/build/bazelutil/stamp.sh @@ -28,7 +28,7 @@ fi # TODO(ricky): Also provide a way to stamp the following variables: # - github.com/cockroachdb/cockroach/pkg/build.channel -# - github.com/cockroachdb/cockroach/pkg/util/log.crashReportEnv +# - github.com/cockroachdb/cockroach/pkg/util/log/logcrash.crashReportEnv # Variables beginning with "STABLE" will be written to stable-status.txt, and # others will be written to volatile-status.txt. diff --git a/c-deps/krb5 b/c-deps/krb5 index 9d3270758433..a1b8c3b2a397 160000 --- a/c-deps/krb5 +++ b/c-deps/krb5 @@ -1 +1 @@ -Subproject commit 9d32707584334fba347b21c06c905728a929c0a1 +Subproject commit a1b8c3b2a397cd8132328e95613319d490684969 diff --git a/docs/generated/sql/bnf/reset_session_stmt.bnf b/docs/generated/sql/bnf/reset_session_stmt.bnf index b2cbe4a2e468..5695f63dc938 100644 --- a/docs/generated/sql/bnf/reset_session_stmt.bnf +++ b/docs/generated/sql/bnf/reset_session_stmt.bnf @@ -1,3 +1,4 @@ reset_session_stmt ::= 'RESET' session_var | 'RESET' 'SESSION' session_var + | 'RESET_ALL' 'ALL' diff --git a/docs/generated/sql/bnf/stmt_block.bnf b/docs/generated/sql/bnf/stmt_block.bnf index d5df68b66665..62adf4a68a13 100644 --- a/docs/generated/sql/bnf/stmt_block.bnf +++ b/docs/generated/sql/bnf/stmt_block.bnf @@ -579,6 +579,7 @@ pause_all_jobs_stmt ::= reset_session_stmt ::= 'RESET' session_var | 'RESET' 'SESSION' session_var + | 'RESET_ALL' 'ALL' reset_csetting_stmt ::= 'RESET' 'CLUSTER' 'SETTING' var_name diff --git a/go.mod b/go.mod index 144730b939c3..904b6fceff6c 100644 --- a/go.mod +++ b/go.mod @@ -43,7 +43,7 @@ require ( github.com/cockroachdb/go-test-teamcity v0.0.0-20191211140407-cff980ad0a55 github.com/cockroachdb/gostdlib v1.13.0 github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f - github.com/cockroachdb/pebble v0.0.0-20220126162719-a5c1766b568a + github.com/cockroachdb/pebble v0.0.0-20220201221612-38b68e17aa97 github.com/cockroachdb/redact v1.1.3 github.com/cockroachdb/returncheck v0.0.0-20200612231554-92cdbca611dd github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 diff --git a/go.sum b/go.sum index 050463e12415..460fc5b70cb4 100644 --- a/go.sum +++ b/go.sum @@ -427,8 +427,8 @@ github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f h1:6jduT9Hfc0n github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/panicparse/v2 v2.0.0-20211103220158-604c82a44f1e h1:FrERdkPlRj+v7fc+PGpey3GUiDGuTR5CsmLCA54YJ8I= github.com/cockroachdb/panicparse/v2 v2.0.0-20211103220158-604c82a44f1e/go.mod h1:pMxsKyCewnV3xPaFvvT9NfwvDTcIx2Xqg0qL5Gq0SjM= -github.com/cockroachdb/pebble v0.0.0-20220126162719-a5c1766b568a h1:JY8MIjk2GyMHjCqmHkNBekLi2N/kNS3uAKheGe78huM= -github.com/cockroachdb/pebble v0.0.0-20220126162719-a5c1766b568a/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU= +github.com/cockroachdb/pebble v0.0.0-20220201221612-38b68e17aa97 h1:zHSurQDtRibMUCQnJhUeV96D5tO8Vq9L39L/xr4BayI= +github.com/cockroachdb/pebble v0.0.0-20220201221612-38b68e17aa97/go.mod h1:buxOO9GBtOcq1DiXDpIPYrmxY020K2A8lOrwno5FetU= github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/redact v1.1.1/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= diff --git a/pkg/base/test_server_args.go b/pkg/base/test_server_args.go index 8cebd7438b99..3e5e85f539a6 100644 --- a/pkg/base/test_server_args.go +++ b/pkg/base/test_server_args.go @@ -309,4 +309,7 @@ type TestTenantArgs struct { // TracingDefault controls whether the tracing will be on or off by default. TracingDefault tracing.TracingMode + + // RPCHeartbeatInterval controls how often the tenant sends Ping requests. + RPCHeartbeatInterval time.Duration } diff --git a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior index 8d7d34288e0d..318f7ab5d710 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior +++ b/pkg/ccl/logictestccl/testdata/logic_test/multi_region_query_behavior @@ -1,5 +1,5 @@ # LogicTest: multiregion-9node-3region-3azs -# TODO(#69265): enable multiregion-9node-3region-3azs-tenant. +# TODO(#75864): enable multiregion-9node-3region-3azs-tenant. # Set the closed timestamp interval to be short to shorten the amount of time # we need to wait for the system config to propagate. diff --git a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior index aece04377d3a..cbd0b9158b0d 100644 --- a/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior +++ b/pkg/ccl/logictestccl/testdata/logic_test/regional_by_row_query_behavior @@ -1,5 +1,5 @@ # LogicTest: multiregion-9node-3region-3azs -# TODO(#69265): enable multiregion-9node-3region-3azs-tenant and/or revert +# TODO(#75864): enable multiregion-9node-3region-3azs-tenant and/or revert # the commit that split these changes out. # Set the closed timestamp interval to be short to shorten the amount of time diff --git a/pkg/ccl/serverccl/BUILD.bazel b/pkg/ccl/serverccl/BUILD.bazel index 9e31c616d29b..ac903f23c668 100644 --- a/pkg/ccl/serverccl/BUILD.bazel +++ b/pkg/ccl/serverccl/BUILD.bazel @@ -9,12 +9,13 @@ go_library( go_test( name = "serverccl_test", - size = "medium", + size = "large", srcs = [ "admin_test.go", "main_test.go", "role_authentication_test.go", "server_sql_test.go", + "tenant_decommissioned_host_test.go", "tenant_vars_test.go", ], embed = [":serverccl"], @@ -24,6 +25,7 @@ go_test( "//pkg/ccl/kvccl", "//pkg/ccl/utilccl", "//pkg/ccl/utilccl/licenseccl", + "//pkg/kv/kvserver/liveness/livenesspb", "//pkg/roachpb:with-mocks", "//pkg/security", "//pkg/security/securitytest", @@ -34,6 +36,7 @@ go_test( "//pkg/sql/distsql", "//pkg/sql/tests", "//pkg/testutils/serverutils", + "//pkg/testutils/skip", "//pkg/testutils/sqlutils", "//pkg/testutils/testcluster", "//pkg/util", diff --git a/pkg/ccl/serverccl/tenant_decommissioned_host_test.go b/pkg/ccl/serverccl/tenant_decommissioned_host_test.go new file mode 100644 index 000000000000..7ab2dbf32dbf --- /dev/null +++ b/pkg/ccl/serverccl/tenant_decommissioned_host_test.go @@ -0,0 +1,77 @@ +// Copyright 2022 The Cockroach Authors. +// +// Licensed as a CockroachDB Enterprise file under the Cockroach Community +// License (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// https://github.com/cockroachdb/cockroach/blob/master/licenses/CCL.txt + +package serverccl + +import ( + "context" + gosql "database/sql" + "testing" + "time" + + "github.com/cockroachdb/cockroach/pkg/base" + "github.com/cockroachdb/cockroach/pkg/kv/kvserver/liveness/livenesspb" + "github.com/cockroachdb/cockroach/pkg/roachpb" + "github.com/cockroachdb/cockroach/pkg/testutils/serverutils" + "github.com/cockroachdb/cockroach/pkg/testutils/skip" + "github.com/cockroachdb/cockroach/pkg/util/leaktest" + "github.com/cockroachdb/cockroach/pkg/util/log" + "github.com/stretchr/testify/require" +) + +func TestTenantWithDecommissionedID(t *testing.T) { + defer leaktest.AfterTest(t)() + defer log.Scope(t).Close(t) + + // This is a regression test for a multi-tenant bug. Each tenant sql server + // is assigned an InstanceID. The InstanceID corresponds to the id column in + // the system.sql_instances table. The sql process sets rpcContext.NodeID = + // InstanceID and PingRequest.NodeID = rpcContext.NodeID. + // + // When a KV node recieves a ping, it checks the NodeID against a + // decommissioned node tombstone list. Until PR #75766, this caused the KV + // node to reject pings from sql servers. The rejected pings would manifest + // as sql connection timeouts. + + skip.UnderStress(t, "decommissioning times out under stress") + + ctx := context.Background() + tc := serverutils.StartNewTestCluster(t, 4, base.TestClusterArgs{}) + defer tc.Stopper().Stop(ctx) + + server := tc.Server(0) + decommissionID := tc.Server(3).NodeID() + require.NoError(t, server.Decommission(ctx, livenesspb.MembershipStatus_DECOMMISSIONING, []roachpb.NodeID{decommissionID})) + require.NoError(t, server.Decommission(ctx, livenesspb.MembershipStatus_DECOMMISSIONED, []roachpb.NodeID{decommissionID})) + + tenantID := serverutils.TestTenantID() + + var tenantSQLServer serverutils.TestTenantInterface + var tenantDB *gosql.DB + for instanceID := 1; instanceID <= int(decommissionID); instanceID++ { + sqlServer, tenant := serverutils.StartTenant(t, server, base.TestTenantArgs{ + TenantID: tenantID, + Existing: instanceID != 1, + // Set a low heartbeat interval. The first heartbeat succeeds + // because the tenant needs to communicate with the kv node to + // determine its instance id. + RPCHeartbeatInterval: time.Millisecond * 5, + }) + if sqlServer.RPCContext().NodeID.Get() == decommissionID { + tenantSQLServer = sqlServer + tenantDB = tenant + } else { + tenant.Close() + } + } + require.NotNil(t, tenantSQLServer) + defer tenantDB.Close() + + _, err := tenantDB.Exec("CREATE ROLE test_user WITH PASSWORD 'password'") + require.NoError(t, err) +} diff --git a/pkg/cmd/dev/doctor.go b/pkg/cmd/dev/doctor.go index ab3101e48006..87272f8c8e0d 100644 --- a/pkg/cmd/dev/doctor.go +++ b/pkg/cmd/dev/doctor.go @@ -13,6 +13,7 @@ package main import ( "errors" "log" + "os" "os/exec" "path/filepath" "runtime" @@ -99,6 +100,24 @@ Please perform the following steps: } } + const binDir = "bin" + const submodulesMarkerPath = binDir + "/.submodules-initialized" + d.log.Println("doctor: running submodules check") + if _, err := os.Stat(submodulesMarkerPath); errors.Is(err, os.ErrNotExist) { + if _, err = d.exec.CommandContextSilent(ctx, "git", "rev-parse", "--is-inside-work-tree"); err != nil { + return err + } + if _, err = d.exec.CommandContextSilent(ctx, "git", "submodule", "update", "--init", "--recursive"); err != nil { + return err + } + if err = d.os.MkdirAll(binDir); err != nil { + return err + } + if err = d.os.WriteFile(submodulesMarkerPath, ""); err != nil { + return err + } + } + // Check whether the build is properly configured to use stamping. passedStampTest := true if _, err := d.exec.CommandContextSilent(ctx, "bazel", "build", "//build/bazelutil:test_stamping"); err != nil { diff --git a/pkg/rpc/context.go b/pkg/rpc/context.go index 19c2a15548eb..2c449fc5a0d6 100644 --- a/pkg/rpc/context.go +++ b/pkg/rpc/context.go @@ -407,10 +407,10 @@ type ContextOptions struct { // preliminary checks but before recording clock offset information. // // It can inject an error. - OnIncomingPing func(*PingRequest) error + OnIncomingPing func(context.Context, *PingRequest) error // OnOutgoingPing intercepts outgoing PingRequests. It may inject an // error. - OnOutgoingPing func(*PingRequest) error + OnOutgoingPing func(context.Context, *PingRequest) error Knobs ContextTestingKnobs // NodeID is the node ID / SQL instance ID container shared @@ -1419,7 +1419,7 @@ func (rpcCtx *Context) runHeartbeat( ServerVersion: rpcCtx.Settings.Version.BinaryVersion(), } - interceptor := func(*PingRequest) error { return nil } + interceptor := func(context.Context, *PingRequest) error { return nil } if fn := rpcCtx.OnOutgoingPing; fn != nil { interceptor = fn } @@ -1429,7 +1429,7 @@ func (rpcCtx *Context) runHeartbeat( ping := func(ctx context.Context) error { // NB: We want the request to fail-fast (the default), otherwise we won't // be notified of transport failures. - if err := interceptor(request); err != nil { + if err := interceptor(ctx, request); err != nil { returnErr = true return err } diff --git a/pkg/rpc/context_test.go b/pkg/rpc/context_test.go index 9807247e14d4..967c05e0247d 100644 --- a/pkg/rpc/context_test.go +++ b/pkg/rpc/context_test.go @@ -179,13 +179,13 @@ func TestPingInterceptors(t *testing.T) { Clock: hlc.NewClock(hlc.UnixNano, 500*time.Millisecond), Stopper: stop.NewStopper(), Settings: cluster.MakeTestingClusterSettings(), - OnOutgoingPing: func(req *PingRequest) error { + OnOutgoingPing: func(ctx context.Context, req *PingRequest) error { if req.TargetNodeID == blockedTargetNodeID { return errBoomSend } return nil }, - OnIncomingPing: func(req *PingRequest) error { + OnIncomingPing: func(ctx context.Context, req *PingRequest) error { if req.OriginNodeID == blockedOriginNodeID { return errBoomRecv } diff --git a/pkg/rpc/heartbeat.go b/pkg/rpc/heartbeat.go index 4ffd83211065..147f99075578 100644 --- a/pkg/rpc/heartbeat.go +++ b/pkg/rpc/heartbeat.go @@ -54,7 +54,7 @@ type HeartbeatService struct { clusterName string disableClusterNameVerification bool - onHandlePing func(*PingRequest) error // see ContextOptions.OnIncomingPing + onHandlePing func(context.Context, *PingRequest) error // see ContextOptions.OnIncomingPing // TestingAllowNamedRPCToAnonymousServer, when defined (in tests), // disables errors in case a heartbeat requests a specific node ID but @@ -169,7 +169,7 @@ func (hs *HeartbeatService) Ping(ctx context.Context, args *PingRequest) (*PingR } if fn := hs.onHandlePing; fn != nil { - if err := fn(args); err != nil { + if err := fn(ctx, args); err != nil { return nil, err } } diff --git a/pkg/server/server.go b/pkg/server/server.go index d30e263d516d..062b682322d6 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -211,13 +211,18 @@ func NewServer(cfg Config, stopper *stop.Stopper) (*Server, error) { Clock: clock, Stopper: stopper, Settings: cfg.Settings, - OnOutgoingPing: func(req *rpc.PingRequest) error { + OnOutgoingPing: func(ctx context.Context, req *rpc.PingRequest) error { // Outgoing ping will block requests with codes.FailedPrecondition to // notify caller that this replica is decommissioned but others could // still be tried as caller node is valid, but not the destination. return checkPingFor(ctx, req.TargetNodeID, codes.FailedPrecondition) }, - OnIncomingPing: func(req *rpc.PingRequest) error { + OnIncomingPing: func(ctx context.Context, req *rpc.PingRequest) error { + // Decommission state is only tracked for the system tenant. + if tenantID, isTenant := roachpb.TenantFromContext(ctx); isTenant && + !roachpb.IsSystemTenantID(tenantID.ToUint64()) { + return nil + } // Incoming ping will reject requests with codes.PermissionDenied to // signal remote node that it is not considered valid anymore and // operations should fail immediately. diff --git a/pkg/server/testserver.go b/pkg/server/testserver.go index 311c9c58b225..863ffe615fc0 100644 --- a/pkg/server/testserver.go +++ b/pkg/server/testserver.go @@ -716,6 +716,9 @@ func (ts *TestServer) StartTenant( tenantKnobs.ClusterSettingsUpdater = st.MakeUpdater() } } + if params.RPCHeartbeatInterval != 0 { + baseCfg.RPCHeartbeatInterval = params.RPCHeartbeatInterval + } sqlServer, addr, httpAddr, err := StartTenant( ctx, stopper, diff --git a/pkg/sql/backfill.go b/pkg/sql/backfill.go index 16e07cb3d4de..f0b5e45765d5 100644 --- a/pkg/sql/backfill.go +++ b/pkg/sql/backfill.go @@ -1056,7 +1056,7 @@ func (sc *SchemaChanger) distIndexBackfill( if updatedTodoSpans == nil { return nil } - nRanges, err := numRangesInSpans(ctx, sc.db, sc.distSQLPlanner, mu.updatedTodoSpans) + nRanges, err := numRangesInSpans(ctx, sc.db, sc.distSQLPlanner, updatedTodoSpans) if err != nil { return err } @@ -1077,9 +1077,16 @@ func (sc *SchemaChanger) distIndexBackfill( }) } + // updateJobMu ensures only one goroutine is calling + // updateJobDetails at a time to avoid a data race in + // SetResumeSpansInJob. This mutex should be uncontended when + // sc.testingKnobs.AlwaysUpdateIndexBackfillDetails is false. + var updateJobMu syncutil.Mutex updateJobDetails = func() error { updatedTodoSpans := getTodoSpansForUpdate() return sc.db.Txn(ctx, func(ctx context.Context, txn *kv.Txn) error { + updateJobMu.Lock() + defer updateJobMu.Unlock() // No processor has returned completed spans yet. if updatedTodoSpans == nil { return nil diff --git a/pkg/sql/distsql_physical_planner.go b/pkg/sql/distsql_physical_planner.go index 951e64eddaed..46ec1e01c1f7 100644 --- a/pkg/sql/distsql_physical_planner.go +++ b/pkg/sql/distsql_physical_planner.go @@ -2228,14 +2228,18 @@ func (dsp *DistSQLPlanner) createPlanForLookupJoin( } joinReaderSpec := execinfrapb.JoinReaderSpec{ - Table: *n.table.desc.TableDesc(), - Type: n.joinType, - LockingStrength: n.table.lockingStrength, - LockingWaitPolicy: n.table.lockingWaitPolicy, - MaintainOrdering: len(n.reqOrdering) > 0, - HasSystemColumns: n.table.containsSystemColumns, - LeftJoinWithPairedJoiner: n.isSecondJoinInPairedJoiner, - LookupBatchBytesLimit: dsp.distSQLSrv.TestingKnobs.JoinReaderBatchBytesLimit, + Table: *n.table.desc.TableDesc(), + Type: n.joinType, + LockingStrength: n.table.lockingStrength, + LockingWaitPolicy: n.table.lockingWaitPolicy, + // TODO(sumeer): specifying ordering here using isFirstJoinInPairedJoiner + // is late in the sense that the cost of this has not been taken into + // account. Make this decision earlier in CustomFuncs.GenerateLookupJoins. + MaintainOrdering: len(n.reqOrdering) > 0 || n.isFirstJoinInPairedJoiner, + HasSystemColumns: n.table.containsSystemColumns, + LeftJoinWithPairedJoiner: n.isSecondJoinInPairedJoiner, + OutputGroupContinuationForLeftRow: n.isFirstJoinInPairedJoiner, + LookupBatchBytesLimit: dsp.distSQLSrv.TestingKnobs.JoinReaderBatchBytesLimit, } joinReaderSpec.IndexIdx, err = getIndexIdx(n.table.index, n.table.desc) if err != nil { @@ -2251,7 +2255,7 @@ func (dsp *DistSQLPlanner) createPlanForLookupJoin( joinReaderSpec.LookupColumnsAreKey = n.eqColsAreKey numInputNodeCols, planToStreamColMap, post, types := - mappingHelperForLookupJoins(plan, n.input, n.table, false /* addContinuationCol */) + mappingHelperForLookupJoins(plan, n.input, n.table, n.isFirstJoinInPairedJoiner) // Set the lookup condition. var indexVarMap []int diff --git a/pkg/sql/distsql_spec_exec_factory.go b/pkg/sql/distsql_spec_exec_factory.go index 9e8ff87bebb2..b513083dd2e0 100644 --- a/pkg/sql/distsql_spec_exec_factory.go +++ b/pkg/sql/distsql_spec_exec_factory.go @@ -658,6 +658,7 @@ func (e *distSQLSpecExecFactory) ConstructLookupJoin( remoteLookupExpr tree.TypedExpr, lookupCols exec.TableColumnOrdinalSet, onCond tree.TypedExpr, + isFirstJoinInPairedJoiner bool, isSecondJoinInPairedJoiner bool, reqOrdering exec.OutputOrdering, locking *tree.LockingItem, diff --git a/pkg/sql/execinfrapb/flow_diagram.go b/pkg/sql/execinfrapb/flow_diagram.go index 7bbac178a601..ee477494eb8e 100644 --- a/pkg/sql/execinfrapb/flow_diagram.go +++ b/pkg/sql/execinfrapb/flow_diagram.go @@ -214,6 +214,9 @@ func (jr *JoinReaderSpec) summary() (string, []string) { if jr.LeftJoinWithPairedJoiner { details = append(details, "second join in paired-join") } + if jr.OutputGroupContinuationForLeftRow { + details = append(details, "first join in paired-join") + } return "JoinReader", details } diff --git a/pkg/sql/logictest/testdata/logic_test/lookup_join b/pkg/sql/logictest/testdata/logic_test/lookup_join index 6a620cc99b38..261bffedc27a 100644 --- a/pkg/sql/logictest/testdata/logic_test/lookup_join +++ b/pkg/sql/logictest/testdata/logic_test/lookup_join @@ -378,7 +378,7 @@ SELECT small.c, large.c FROM small LEFT JOIN large ON small.c = large.b AND larg 27 NULL 30 NULL -## Left join with ON filter on non-covering index +## Left join with ON filter on non-covering index. Will execute as paired-joins. query II rowsort SELECT small.c, large.d FROM small LEFT JOIN large ON small.c = large.b AND large.d < 30 ---- @@ -393,6 +393,26 @@ SELECT small.c, large.d FROM small LEFT JOIN large ON small.c = large.b AND larg 27 NULL 30 NULL +## Left semi join with ON filter on non-covering index. Will execute as paired-joins. +query I rowsort +SELECT small.c FROM small WHERE EXISTS(SELECT 1 FROM large WHERE small.c = large.b AND large.d < 30) +---- +6 +12 + +## Left anti join with ON filter on non-covering index. Will execute as paired-joins. +query I rowsort +SELECT small.c FROM small WHERE NOT EXISTS(SELECT 1 FROM large WHERE small.c = large.b AND large.d < 30) +---- +3 +9 +15 +18 +21 +24 +27 +30 + ########################################################### # LOOKUP JOINS ON IMPLICIT INDEX KEY COLUMNS # # https://github.com/cockroachdb/cockroach/issues/31777 # diff --git a/pkg/sql/logictest/testdata/logic_test/set b/pkg/sql/logictest/testdata/logic_test/set index 76a0c04c8ea8..27d5b26de2d9 100644 --- a/pkg/sql/logictest/testdata/logic_test/set +++ b/pkg/sql/logictest/testdata/logic_test/set @@ -743,6 +743,20 @@ SHOW tracing.custom ---- ijk +# Test that RESET ALL changes custom options to empty strings. +statement ok +RESET ALL + +query T +SHOW tracing.custom +---- +· + +query T +SHOW custom_option.set_sql +---- +· + statement error unrecognized configuration parameter "custom_option.does_not_yet_exist" SHOW custom_option.does_not_yet_exist diff --git a/pkg/sql/logictest/testdata/logic_test/set_role b/pkg/sql/logictest/testdata/logic_test/set_role index cb5e07c23d15..e1dd05385943 100644 --- a/pkg/sql/logictest/testdata/logic_test/set_role +++ b/pkg/sql/logictest/testdata/logic_test/set_role @@ -357,6 +357,15 @@ root root root root statement ok SET ROLE testuser +# Verify that RESET ALL does *not* affect role. +statement ok +RESET ALL + +query TTTT +SELECT current_user(), current_user, session_user(), session_user +---- +testuser testuser root root + query T SELECT user_name FROM crdb_internal.node_sessions WHERE active_queries LIKE 'SELECT user_name%' diff --git a/pkg/sql/logictest/testdata/logic_test/set_time_zone b/pkg/sql/logictest/testdata/logic_test/set_time_zone index a3c7ecb8f4ce..adbe9034dcd8 100644 --- a/pkg/sql/logictest/testdata/logic_test/set_time_zone +++ b/pkg/sql/logictest/testdata/logic_test/set_time_zone @@ -231,3 +231,18 @@ query T SELECT TIME '05:40:00'::TIMETZ ---- 0000-01-01 05:40:00 +0000 UTC + +statement error pq: invalid value for parameter "timezone": "'168'": cannot find time zone "168": UTC timezone offset is out of range. +SET TIME ZONE '168' + +statement error pq: invalid value for parameter "timezone": "'-168'": cannot find time zone "-168": UTC timezone offset is out of range. +SET TIME ZONE '-168' + +statement error pq: invalid value for parameter "timezone": "'-0500'": cannot find time zone "-0500": UTC timezone offset is out of range. +SET TIME ZONE '-0500' + +statement error pq: invalid value for parameter "timezone": "'0500'": cannot find time zone "0500": UTC timezone offset is out of range. +SET TIME ZONE '0500' + +statement error pq: invalid value for parameter "timezone": "'-0500'": cannot find time zone "-0500": UTC timezone offset is out of range. +SET TIME ZONE '-0500' diff --git a/pkg/sql/lookup_join.go b/pkg/sql/lookup_join.go index f9d03e907527..d91dc01af15a 100644 --- a/pkg/sql/lookup_join.go +++ b/pkg/sql/lookup_join.go @@ -56,13 +56,17 @@ type lookupJoinNode struct { remoteLookupExpr tree.TypedExpr // columns are the produced columns, namely the input columns and (unless the - // join type is semi or anti join) the columns in the table scanNode. + // join type is semi or anti join) the columns in the table scanNode. It + // includes an additional continuation column when IsFirstJoinInPairedJoin + // is true. columns colinfo.ResultColumns // onCond is any ON condition to be used in conjunction with the implicit // equality condition on eqCols or the conditions in lookupExpr. onCond tree.TypedExpr + // At most one of is{First,Second}JoinInPairedJoiner can be true. + isFirstJoinInPairedJoiner bool isSecondJoinInPairedJoiner bool reqOrdering ReqOrdering diff --git a/pkg/sql/opt/exec/execbuilder/relational.go b/pkg/sql/opt/exec/execbuilder/relational.go index 5e8b8ae83c4d..f12eb9934d98 100644 --- a/pkg/sql/opt/exec/execbuilder/relational.go +++ b/pkg/sql/opt/exec/execbuilder/relational.go @@ -1752,10 +1752,25 @@ func (b *Builder) buildLookupJoin(join *memo.LookupJoinExpr) (execPlan, error) { inputCols := join.Input.Relational().OutputCols lookupCols := join.Cols.Difference(inputCols) + if join.IsFirstJoinInPairedJoiner { + lookupCols.Remove(join.ContinuationCol) + } lookupOrdinals, lookupColMap := b.getColumns(lookupCols, join.Table) - allCols := joinOutputMap(input.outputCols, lookupColMap) - + // allExprCols are the columns used in expressions evaluated by this join. + allExprCols := joinOutputMap(input.outputCols, lookupColMap) + allCols := allExprCols + if join.IsFirstJoinInPairedJoiner { + // allCols needs to include the continuation column since it will be + // in the result output by this join. + allCols = allExprCols.Copy() + maxValue, ok := allCols.MaxValue() + if !ok { + return execPlan{}, errors.AssertionFailedf("allCols should not be empty") + } + // Assign the continuation column the next unused value in the map. + allCols.Set(int(join.ContinuationCol), maxValue+1) + } res := execPlan{outputCols: allCols} if join.JoinType == opt.SemiJoinOp || join.JoinType == opt.AntiJoinOp { // For semi and anti join, only the left columns are output. @@ -1763,8 +1778,8 @@ func (b *Builder) buildLookupJoin(join *memo.LookupJoinExpr) (execPlan, error) { } ctx := buildScalarCtx{ - ivh: tree.MakeIndexedVarHelper(nil /* container */, allCols.Len()), - ivarMap: allCols, + ivh: tree.MakeIndexedVarHelper(nil /* container */, allExprCols.Len()), + ivarMap: allExprCols, } var lookupExpr, remoteLookupExpr tree.TypedExpr if len(join.LookupExpr) > 0 { @@ -1809,6 +1824,7 @@ func (b *Builder) buildLookupJoin(join *memo.LookupJoinExpr) (execPlan, error) { remoteLookupExpr, lookupOrdinals, onExpr, + join.IsFirstJoinInPairedJoiner, join.IsSecondJoinInPairedJoiner, res.reqOrdering(join), locking, diff --git a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join index 31b233318fc7..dab38db1f8ae 100644 --- a/pkg/sql/opt/exec/execbuilder/testdata/lookup_join +++ b/pkg/sql/opt/exec/execbuilder/testdata/lookup_join @@ -706,8 +706,6 @@ vectorized: true spans: FULL SCAN # Left join with ON filter on non-covering index -# TODO(radu): this doesn't use lookup join yet, the current rules don't cover -# left join with ON condition on columns that are not covered by the index. query T EXPLAIN (VERBOSE) SELECT small.c, large.d FROM small LEFT JOIN large ON small.c = large.b AND large.d < 30 ---- @@ -718,27 +716,88 @@ vectorized: true │ columns: (c, d) │ estimated row count: 336 │ -└── • hash join (right outer) - │ columns: (b, d, c) +└── • project + │ columns: (c, b, d) │ estimated row count: 336 - │ equality: (b) = (c) │ - ├── • filter - │ │ columns: (b, d) - │ │ estimated row count: 3,303 - │ │ filter: d < 30 - │ │ - │ └── • scan - │ columns: (b, d) - │ estimated row count: 10,000 (100% of the table; stats collected ago) - │ table: large@large_pkey - │ spans: FULL SCAN + └── • lookup join (left outer) + │ columns: (c, a, b, cont, d) + │ table: large@large_pkey + │ equality: (a, b) = (a,b) + │ equality cols are key + │ pred: d < 30 + │ + └── • lookup join (left outer) + │ columns: (c, a, b, cont) + │ estimated row count: 1,000 + │ table: large@bc + │ equality: (c) = (b) + │ + └── • scan + columns: (c) + estimated row count: 100 (100% of the table; stats collected ago) + table: small@small_pkey + spans: FULL SCAN + +# Left semi-join with ON filter on non-covering index +query T +EXPLAIN (VERBOSE) SELECT small.c FROM small WHERE EXISTS(SELECT 1 FROM large WHERE small.c = large.b AND large.d < 30) +---- +distribution: full +vectorized: true +· +• project +│ columns: (c) +│ estimated row count: 100 +│ +└── • lookup join (semi) + │ columns: (c, a, b, cont) + │ table: large@large_pkey + │ equality: (a, b) = (a,b) + │ equality cols are key + │ pred: d < 30 │ - └── • scan - columns: (c) - estimated row count: 100 (100% of the table; stats collected ago) - table: small@small_pkey - spans: FULL SCAN + └── • lookup join (inner) + │ columns: (c, a, b, cont) + │ estimated row count: 990 + │ table: large@bc + │ equality: (c) = (b) + │ + └── • scan + columns: (c) + estimated row count: 100 (100% of the table; stats collected ago) + table: small@small_pkey + spans: FULL SCAN + +# Left anti-join with ON filter on non-covering index +query T +EXPLAIN (VERBOSE) SELECT small.c FROM small WHERE NOT EXISTS(SELECT 1 FROM large WHERE small.c = large.b AND large.d < 30) +---- +distribution: full +vectorized: true +· +• project +│ columns: (c) +│ estimated row count: 0 +│ +└── • lookup join (anti) + │ columns: (c, a, b, cont) + │ table: large@large_pkey + │ equality: (a, b) = (a,b) + │ equality cols are key + │ pred: d < 30 + │ + └── • lookup join (left outer) + │ columns: (c, a, b, cont) + │ estimated row count: 1,000 + │ table: large@bc + │ equality: (c) = (b) + │ + └── • scan + columns: (c) + estimated row count: 100 (100% of the table; stats collected ago) + table: small@small_pkey + spans: FULL SCAN ########################################################### # LOOKUP JOINS ON IMPLICIT INDEX KEY COLUMNS # diff --git a/pkg/sql/opt/exec/explain/result_columns.go b/pkg/sql/opt/exec/explain/result_columns.go index 5ae050ba4f8d..5c2a15104d3a 100644 --- a/pkg/sql/opt/exec/explain/result_columns.go +++ b/pkg/sql/opt/exec/explain/result_columns.go @@ -81,7 +81,12 @@ func getResultColumns( case lookupJoinOp: a := args.(*lookupJoinArgs) - return joinColumns(a.JoinType, inputs[0], tableColumns(a.Table, a.LookupCols)), nil + cols := joinColumns(a.JoinType, inputs[0], tableColumns(a.Table, a.LookupCols)) + // The following matches the behavior of execFactory.ConstructLookupJoin. + if a.IsFirstJoinInPairedJoiner { + cols = append(cols, colinfo.ResultColumn{Name: "cont", Typ: types.Bool}) + } + return cols, nil case ordinalityOp: return appendColumns(inputs[0], colinfo.ResultColumn{ diff --git a/pkg/sql/opt/exec/factory.opt b/pkg/sql/opt/exec/factory.opt index ac3a7173c3fb..fae22c98da61 100644 --- a/pkg/sql/opt/exec/factory.opt +++ b/pkg/sql/opt/exec/factory.opt @@ -280,6 +280,7 @@ define LookupJoin { RemoteLookupExpr tree.TypedExpr LookupCols exec.TableColumnOrdinalSet OnCond tree.TypedExpr + IsFirstJoinInPairedJoiner bool IsSecondJoinInPairedJoiner bool ReqOrdering exec.OutputOrdering Locking *tree.LockingItem diff --git a/pkg/sql/opt/memo/check_expr.go b/pkg/sql/opt/memo/check_expr.go index b38368c5d135..14d72e9c8a0d 100644 --- a/pkg/sql/opt/memo/check_expr.go +++ b/pkg/sql/opt/memo/check_expr.go @@ -234,15 +234,33 @@ func (m *Memo) CheckExpr(e opt.Expr) { if !t.Cols.SubsetOf(requiredCols) { panic(errors.AssertionFailedf("lookup join with columns that are not required")) } - if t.IsSecondJoinInPairedJoiner { - ij, ok := t.Input.(*InvertedJoinExpr) - if !ok { + if t.IsFirstJoinInPairedJoiner { + switch t.JoinType { + case opt.InnerJoinOp, opt.LeftJoinOp: + default: panic(errors.AssertionFailedf( - "lookup paired-join is paired with %T instead of inverted join", t.Input)) + "first join in paired joiner must be an inner or left join. found %s", + t.JoinType.String(), + )) } - if !ij.IsFirstJoinInPairedJoiner { - panic(errors.AssertionFailedf( - "lookup paired-join is paired with inverted join that thinks it is unpaired")) + if t.ContinuationCol == 0 { + panic(errors.AssertionFailedf("first join in paired joiner must have a continuation column")) + } + } + if t.IsSecondJoinInPairedJoiner { + switch firstJoin := t.Input.(type) { + case *InvertedJoinExpr: + if !firstJoin.IsFirstJoinInPairedJoiner { + panic(errors.AssertionFailedf( + "lookup paired-join is paired with inverted join that thinks it is unpaired")) + } + case *LookupJoinExpr: + if !firstJoin.IsFirstJoinInPairedJoiner { + panic(errors.AssertionFailedf( + "lookup paired-join is paired with lookup join that thinks it is unpaired")) + } + default: + panic(errors.AssertionFailedf("lookup paired-join is paired with %T", t.Input)) } } diff --git a/pkg/sql/opt/memo/expr_format.go b/pkg/sql/opt/memo/expr_format.go index 607954338846..838c4cb7e7b1 100644 --- a/pkg/sql/opt/memo/expr_format.go +++ b/pkg/sql/opt/memo/expr_format.go @@ -525,6 +525,9 @@ func (f *ExprFmtCtx) formatRelational(e RelExpr, tp treeprinter.Node) { if t.LookupColsAreTableKey { tp.Childf("lookup columns are key") } + if t.IsFirstJoinInPairedJoiner { + f.formatColList(e, tp, "first join in paired joiner; continuation column:", opt.ColList{t.ContinuationCol}) + } if t.IsSecondJoinInPairedJoiner { tp.Childf("second join in paired joiner") } diff --git a/pkg/sql/opt/ops/relational.opt b/pkg/sql/opt/ops/relational.opt index 630aa855f3ce..74e7957d7b4c 100644 --- a/pkg/sql/opt/ops/relational.opt +++ b/pkg/sql/opt/ops/relational.opt @@ -402,10 +402,22 @@ define LookupJoinPrivate { # table (and thus each left row matches with at most one table row). LookupColsAreTableKey bool - # IsSecondJoinInPairedJoiner is true if this is the second join of a - # paired-joiner used for left joins. + # At most one of Is{First,Second}JoinInPairedJoiner can be true. + # + # IsFirstJoinInPairedJoiner is true if this is the first (i.e., lower in the + # plan tree) join of a paired-joiner used for left joins. + IsFirstJoinInPairedJoiner bool + + # IsSecondJoinInPairedJoiner is true if this is the second (i.e., higher in + # the plan tree) join of a paired-joiner used for left joins. IsSecondJoinInPairedJoiner bool + # ContinuationCol is the column ID of the continuation column when + # IsFirstJoinInPairedJoiner is true. The continuation column is a boolean + # column that indicates whether an output row is a continuation of a group + # corresponding to a single left input row. + ContinuationCol ColumnID + # LocalityOptimized is true if this lookup join is part of a locality # optimized search strategy. For semi, inner, and left joins, this means # that RemoteLookupExpr will be non-nil. See comments above that field for diff --git a/pkg/sql/opt/xform/join_funcs.go b/pkg/sql/opt/xform/join_funcs.go index 24199fec20af..9961af400a90 100644 --- a/pkg/sql/opt/xform/join_funcs.go +++ b/pkg/sql/opt/xform/join_funcs.go @@ -158,10 +158,12 @@ func (c *CustomFuncs) GenerateMergeJoins( // Input Scan(t) Input // // -// 2. The index is not covering. We have to generate an index join above the -// lookup join. Note that this index join is also implemented as a -// LookupJoin, because an IndexJoin can only output columns from one table, -// whereas we also need to output columns from Input. +// 2. The index is not covering, but we can fully evaluate the ON condition +// using the index, or we are doing an InnerJoin. We have to generate +// an index join above the lookup join. Note that this index join is also +// implemented as a LookupJoin, because an IndexJoin can only output +// columns from one table, whereas we also need to output columns from +// Input. // // Join LookupJoin(t@primary) // / \ | @@ -178,13 +180,31 @@ func (c *CustomFuncs) GenerateMergeJoins( // // We want to first join abc with the index on y (which provides columns y, x) // and then use a lookup join to retrieve column z. The "index join" (top -// LookupJoin) will produce columns a,b,c,x,y; the lookup columns are just z -// (the original index join produced x,y,z). +// LookupJoin) will produce columns a,b,c,x,y,z; the lookup columns are just z +// (the original lookup join produced a,b,c,x,y). // // Note that the top LookupJoin "sees" column IDs from the table on both // "sides" (in this example x,y on the left and z on the right) but there is // no overlap. // +// 3. The index is not covering and we cannot fully evaluate the ON condition +// using the index, and we are doing a LeftJoin/SemiJoin/AntiJoin. This is +// handled using a lower-upper pair of joins that are further specialized +// as paired-joins. The first (lower) join outputs a continuation column +// that is used by the second (upper) join. Like case 2, both are lookup +// joins, but paired-joins explicitly know their role in the pair and +// behave accordingly. +// +// For example, using the same tables in the example for case 2: +// SELECT * FROM abc LEFT JOIN xyz ON a=y AND b=z +// +// The first join will evaluate a=y and produce columns a,b,c,x,y,cont +// where cont is the continuation column used to group together rows that +// correspond to the same original a,b,c. The second join will fetch z from +// the primary index, evaluate b=z, and produce columns a,b,c,x,y,z. A +// similar approach works for anti-joins and semi-joins. +// +// // A lookup join can be created when the ON condition or implicit filters from // CHECK constraints and computed columns constrain a prefix of the index // columns to non-ranging constant values. To support this, the constant values @@ -585,21 +605,27 @@ func (c *CustomFuncs) generateLookupJoinsImpl( } } - // All code that follows is for case 2 (see function comment). + // All code that follows is for cases 2 and 3 (see function comment). + // We need to generate two joins: a lower join followed by an upper join. + // In case 3, this lower-upper pair of joins is further specialized into + // paired-joins where we refer to the lower as first and upper as second. if scanPrivate.Flags.NoIndexJoin { return } - if joinType == opt.SemiJoinOp || joinType == opt.AntiJoinOp { - // We cannot use a non-covering index for semi and anti join. Note that - // since the semi/anti join doesn't pass through any columns, "non - // covering" here means that not all columns in the ON condition are - // available. - // - // TODO(radu): We could create a semi/anti join on top of an inner join if - // the lookup columns form a key (to guarantee that input rows are not - // duplicated by the inner join). - return + pairedJoins := false + continuationCol := opt.ColumnID(0) + lowerJoinType := joinType + if joinType == opt.SemiJoinOp { + // Case 3: Semi joins are converted to a pair consisting of an inner + // lookup join and semi lookup join. + pairedJoins = true + lowerJoinType = opt.InnerJoinOp + } else if joinType == opt.AntiJoinOp { + // Case 3: Anti joins are converted to a pair consisting of a left + // lookup join and anti lookup join. + pairedJoins = true + lowerJoinType = opt.LeftJoinOp } if pkCols == nil { @@ -619,6 +645,7 @@ func (c *CustomFuncs) generateLookupJoinsImpl( // can refer to: input columns, or columns available in the index. onCols := indexCols.Union(inputProps.OutputCols) if c.FiltersBoundBy(lookupJoin.On, onCols) { + // Case 2. // The ON condition refers only to the columns available in the index. // // For LeftJoin, both LookupJoins perform a LeftJoin. A null-extended row @@ -632,21 +659,33 @@ func (c *CustomFuncs) generateLookupJoinsImpl( // conditions that refer to other columns. We can put the former in the // lower LookupJoin and the latter in the index join. // - // This works for InnerJoin but not for LeftJoin because of a - // technicality: if an input (left) row has matches in the lower - // LookupJoin but has no matches in the index join, only the columns - // looked up by the top index join get NULL-extended. + // This works in a straightforward manner for InnerJoin but not for + // LeftJoin because of a technicality: if an input (left) row has + // matches in the lower LookupJoin but has no matches in the index join, + // only the columns looked up by the top index join get NULL-extended. + // Additionally if none of the lower matches are matches in the index + // join, we want to output only one NULL-extended row. To accomplish + // this, we need to use paired-joins. if joinType == opt.LeftJoinOp { - // TODO(radu): support LeftJoin, perhaps by looking up all columns and - // discarding columns that are already available from the lower - // LookupJoin. This requires a projection to avoid having the same - // ColumnIDs on both sides of the index join. - return + // Case 3. + pairedJoins = true + // The lowerJoinType continues to be LeftJoinOp. } + // We have already set pairedJoins=true for SemiJoin, AntiJoin earlier, + // and we don't need to do that for InnerJoin. The following sets up the + // ON conditions for both Case 2 and Case 3, when doing 2 joins that + // will each evaluate part of the ON condition. conditions := lookupJoin.On lookupJoin.On = c.ExtractBoundConditions(conditions, onCols) indexJoin.On = c.ExtractUnboundConditions(conditions, onCols) } + if pairedJoins { + lookupJoin.JoinType = lowerJoinType + continuationCol = c.constructContinuationColumnForPairedJoin() + lookupJoin.IsFirstJoinInPairedJoiner = true + lookupJoin.ContinuationCol = continuationCol + lookupJoin.Cols.Add(continuationCol) + } indexJoin.Input = c.e.f.ConstructLookupJoin( lookupJoin.Input, @@ -659,6 +698,15 @@ func (c *CustomFuncs) generateLookupJoinsImpl( indexJoin.KeyCols = pkCols indexJoin.Cols = rightCols.Union(inputProps.OutputCols) indexJoin.LookupColsAreTableKey = true + if pairedJoins { + indexJoin.IsSecondJoinInPairedJoiner = true + } + + // If this is a semi- or anti-join, ensure the columns do not include any + // unneeded right-side columns. + if joinType == opt.SemiJoinOp || joinType == opt.AntiJoinOp { + indexJoin.Cols = inputProps.OutputCols.Union(indexJoin.On.OuterCols()) + } // Create the LookupJoin for the index join in the same group. c.e.mem.AddLookupJoinToGroup(&indexJoin, grp) diff --git a/pkg/sql/opt/xform/testdata/external/liquibase b/pkg/sql/opt/xform/testdata/external/liquibase index a9881318772b..0b6e4bb287c5 100644 --- a/pkg/sql/opt/xform/testdata/external/liquibase +++ b/pkg/sql/opt/xform/testdata/external/liquibase @@ -204,66 +204,66 @@ project │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ ├── key: (1,84) │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (84)-->(85), (1,84)-->(36,37,91,105,106), (105)-->(106), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) - │ │ │ │ │ ├── right-join (hash) + │ │ │ │ │ ├── left-join (lookup pg_index [as=ind]) │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 indexrelid:84 indrelid:85 indisclustered:91 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 + │ │ │ │ │ │ ├── key columns: [84] = [84] + │ │ │ │ │ │ ├── lookup columns are key + │ │ │ │ │ │ ├── second join in paired joiner │ │ │ │ │ │ ├── key: (1,84) │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (84)-->(85), (1,84)-->(36,37,91), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) - │ │ │ │ │ │ ├── select - │ │ │ │ │ │ │ ├── columns: indexrelid:84!null indrelid:85!null indisclustered:91!null - │ │ │ │ │ │ │ ├── key: (84) - │ │ │ │ │ │ │ ├── fd: ()-->(91), (84)-->(85) - │ │ │ │ │ │ │ ├── scan pg_index [as=ind] - │ │ │ │ │ │ │ │ ├── columns: indexrelid:84!null indrelid:85!null indisclustered:91!null - │ │ │ │ │ │ │ │ ├── key: (84) - │ │ │ │ │ │ │ │ └── fd: (84)-->(85,91) - │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ └── indisclustered:91 [outer=(91), constraints=(/91: [/true - /true]; tight), fd=()-->(91)] - │ │ │ │ │ │ ├── left-join (lookup pg_tablespace [as=t]) - │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 - │ │ │ │ │ │ │ ├── key columns: [8] = [36] - │ │ │ │ │ │ │ ├── lookup columns are key - │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,36,37,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_server [as=fs]) - │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 - │ │ │ │ │ │ │ │ ├── key columns: [135] = [139] + │ │ │ │ │ │ ├── left-join (lookup pg_index@pg_index_indrelid_index [as=ind]) + │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 indexrelid:84 indrelid:85 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 continuation:239 + │ │ │ │ │ │ │ ├── key columns: [1] = [85] + │ │ │ │ │ │ │ ├── first join in paired joiner; continuation column: continuation:239 + │ │ │ │ │ │ │ ├── key: (1,84) + │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,36,37,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3), (84)-->(85,239) + │ │ │ │ │ │ │ ├── left-join (lookup pg_tablespace [as=t]) + │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 + │ │ │ │ │ │ │ │ ├── key columns: [8] = [36] │ │ │ │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_table [as=ft]) - │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 - │ │ │ │ │ │ │ │ │ ├── key columns: [1] = [134] + │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,36,37,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_server [as=fs]) + │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 + │ │ │ │ │ │ │ │ │ ├── key columns: [135] = [139] │ │ │ │ │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ │ │ ├── inner-join (hash) - │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null - │ │ │ │ │ │ │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_table [as=ft]) + │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 + │ │ │ │ │ │ │ │ │ │ ├── key columns: [1] = [134] + │ │ │ │ │ │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ │ │ │ ├── select - │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 + │ │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ │ │ ├── inner-join (hash) + │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null + │ │ │ │ │ │ │ │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) │ │ │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ │ │ ├── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) - │ │ │ │ │ │ │ │ │ │ │ ├── scan pg_class [as=c] + │ │ │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ │ │ │ ├── select │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 │ │ │ │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ │ │ │ └── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) + │ │ │ │ │ │ │ │ │ │ │ │ ├── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) + │ │ │ │ │ │ │ │ │ │ │ │ ├── scan pg_class [as=c] + │ │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 + │ │ │ │ │ │ │ │ │ │ │ │ │ ├── key: (1) + │ │ │ │ │ │ │ │ │ │ │ │ │ └── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) + │ │ │ │ │ │ │ │ │ │ │ │ └── filters + │ │ │ │ │ │ │ │ │ │ │ │ └── (c.relkind:17 = 'r') OR (c.relkind:17 = 'f') [outer=(17), constraints=(/17: [/'f' - /'f'] [/'r' - /'r']; tight)] + │ │ │ │ │ │ │ │ │ │ │ ├── scan pg_namespace@pg_namespace_nspname_index [as=n] + │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: n.oid:30!null n.nspname:31!null + │ │ │ │ │ │ │ │ │ │ │ │ ├── constraint: /31: [/'public' - /'public'] + │ │ │ │ │ │ │ │ │ │ │ │ ├── cardinality: [0 - 1] + │ │ │ │ │ │ │ │ │ │ │ │ ├── key: () + │ │ │ │ │ │ │ │ │ │ │ │ └── fd: ()-->(30,31) │ │ │ │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ │ │ │ └── (c.relkind:17 = 'r') OR (c.relkind:17 = 'f') [outer=(17), constraints=(/17: [/'f' - /'f'] [/'r' - /'r']; tight)] - │ │ │ │ │ │ │ │ │ │ ├── scan pg_namespace@pg_namespace_nspname_index [as=n] - │ │ │ │ │ │ │ │ │ │ │ ├── columns: n.oid:30!null n.nspname:31!null - │ │ │ │ │ │ │ │ │ │ │ ├── constraint: /31: [/'public' - /'public'] - │ │ │ │ │ │ │ │ │ │ │ ├── cardinality: [0 - 1] - │ │ │ │ │ │ │ │ │ │ │ ├── key: () - │ │ │ │ │ │ │ │ │ │ │ └── fd: ()-->(30,31) - │ │ │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ │ │ └── n.oid:30 = c.relnamespace:3 [outer=(3,30), constraints=(/3: (/NULL - ]; /30: (/NULL - ]), fd=(3)==(30), (30)==(3)] + │ │ │ │ │ │ │ │ │ │ │ └── n.oid:30 = c.relnamespace:3 [outer=(3,30), constraints=(/3: (/NULL - ]; /30: (/NULL - ]), fd=(3)==(30), (30)==(3)] + │ │ │ │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ └── filters - │ │ │ │ │ │ └── indrelid:85 = c.oid:1 [outer=(1,85), constraints=(/1: (/NULL - ]; /85: (/NULL - ]), fd=(1)==(85), (85)==(1)] + │ │ │ │ │ │ └── indisclustered:91 [outer=(91), constraints=(/91: [/true - /true]; tight), fd=()-->(91)] │ │ │ │ │ └── filters (true) │ │ │ │ └── filters │ │ │ │ └── i.inhrelid:44 = c.oid:1 [outer=(1,44), constraints=(/1: (/NULL - ]; /44: (/NULL - ]), fd=(1)==(44), (44)==(1)] diff --git a/pkg/sql/opt/xform/testdata/external/navicat b/pkg/sql/opt/xform/testdata/external/navicat index 2fdef7864097..16d52516f27a 100644 --- a/pkg/sql/opt/xform/testdata/external/navicat +++ b/pkg/sql/opt/xform/testdata/external/navicat @@ -208,66 +208,66 @@ sort │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ ├── key: (1,84) │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (84)-->(85), (1,84)-->(36,37,91,105,106), (105)-->(106), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) - │ │ │ │ │ ├── right-join (hash) + │ │ │ │ │ ├── left-join (lookup pg_index [as=ind]) │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 indexrelid:84 indrelid:85 indisclustered:91 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 + │ │ │ │ │ │ ├── key columns: [84] = [84] + │ │ │ │ │ │ ├── lookup columns are key + │ │ │ │ │ │ ├── second join in paired joiner │ │ │ │ │ │ ├── key: (1,84) │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (84)-->(85), (1,84)-->(36,37,91), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) - │ │ │ │ │ │ ├── select - │ │ │ │ │ │ │ ├── columns: indexrelid:84!null indrelid:85!null indisclustered:91!null - │ │ │ │ │ │ │ ├── key: (84) - │ │ │ │ │ │ │ ├── fd: ()-->(91), (84)-->(85) - │ │ │ │ │ │ │ ├── scan pg_index [as=ind] - │ │ │ │ │ │ │ │ ├── columns: indexrelid:84!null indrelid:85!null indisclustered:91!null - │ │ │ │ │ │ │ │ ├── key: (84) - │ │ │ │ │ │ │ │ └── fd: (84)-->(85,91) - │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ └── indisclustered:91 [outer=(91), constraints=(/91: [/true - /true]; tight), fd=()-->(91)] - │ │ │ │ │ │ ├── left-join (lookup pg_tablespace [as=t]) - │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 - │ │ │ │ │ │ │ ├── key columns: [8] = [36] - │ │ │ │ │ │ │ ├── lookup columns are key - │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,36,37,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_server [as=fs]) - │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 - │ │ │ │ │ │ │ │ ├── key columns: [135] = [139] + │ │ │ │ │ │ ├── left-join (lookup pg_index@pg_index_indrelid_index [as=ind]) + │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 indexrelid:84 indrelid:85 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 continuation:239 + │ │ │ │ │ │ │ ├── key columns: [1] = [85] + │ │ │ │ │ │ │ ├── first join in paired joiner; continuation column: continuation:239 + │ │ │ │ │ │ │ ├── key: (1,84) + │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,36,37,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3), (84)-->(85,239) + │ │ │ │ │ │ │ ├── left-join (lookup pg_tablespace [as=t]) + │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null t.oid:36 spcname:37 ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 + │ │ │ │ │ │ │ │ ├── key columns: [8] = [36] │ │ │ │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_table [as=ft]) - │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 - │ │ │ │ │ │ │ │ │ ├── key columns: [1] = [134] + │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,36,37,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (36)-->(37), (37)-->(36), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_server [as=fs]) + │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 fs.oid:139 srvname:140 + │ │ │ │ │ │ │ │ │ ├── key columns: [135] = [139] │ │ │ │ │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ │ │ ├── inner-join (hash) - │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null - │ │ │ │ │ │ │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) + │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136,139,140), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136,139,140), (139)~~>(140), (140)~~>(139), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ │ ├── left-join (lookup pg_foreign_table [as=ft]) + │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null ftrelid:134 ftserver:135 ftoptions:136 + │ │ │ │ │ │ │ │ │ │ ├── key columns: [1] = [134] + │ │ │ │ │ │ │ │ │ │ ├── lookup columns are key │ │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (3)==(30), (30)==(3) - │ │ │ │ │ │ │ │ │ │ ├── select - │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 + │ │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27,134-136), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (134)-->(135,136), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ │ │ ├── inner-join (hash) + │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 n.oid:30!null n.nspname:31!null + │ │ │ │ │ │ │ │ │ │ │ ├── multiplicity: left-rows(zero-or-one), right-rows(zero-or-more) │ │ │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ │ │ ├── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) - │ │ │ │ │ │ │ │ │ │ │ ├── scan pg_class [as=c] + │ │ │ │ │ │ │ │ │ │ │ ├── fd: ()-->(3,30,31), (1)-->(2,5,8,10,13,15,17,20,22,23,26,27), (2)-->(1,5,8,10,13,15,17,20,22,23,26,27), (3)==(30), (30)==(3) + │ │ │ │ │ │ │ │ │ │ │ ├── select │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 │ │ │ │ │ │ │ │ │ │ │ │ ├── key: (1) - │ │ │ │ │ │ │ │ │ │ │ │ └── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) + │ │ │ │ │ │ │ │ │ │ │ │ ├── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) + │ │ │ │ │ │ │ │ │ │ │ │ ├── scan pg_class [as=c] + │ │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: c.oid:1!null c.relname:2!null c.relnamespace:3!null c.relowner:5!null c.reltablespace:8!null c.reltuples:10!null c.relhasindex:13!null c.relpersistence:15!null c.relkind:17!null c.relhasoids:20!null c.relhasrules:22!null c.relhastriggers:23!null c.relacl:26 c.reloptions:27 + │ │ │ │ │ │ │ │ │ │ │ │ │ ├── key: (1) + │ │ │ │ │ │ │ │ │ │ │ │ │ └── fd: (1)-->(2,3,5,8,10,13,15,17,20,22,23,26,27), (2,3)-->(1,5,8,10,13,15,17,20,22,23,26,27) + │ │ │ │ │ │ │ │ │ │ │ │ └── filters + │ │ │ │ │ │ │ │ │ │ │ │ └── (c.relkind:17 = 'r') OR (c.relkind:17 = 'f') [outer=(17), constraints=(/17: [/'f' - /'f'] [/'r' - /'r']; tight)] + │ │ │ │ │ │ │ │ │ │ │ ├── scan pg_namespace@pg_namespace_nspname_index [as=n] + │ │ │ │ │ │ │ │ │ │ │ │ ├── columns: n.oid:30!null n.nspname:31!null + │ │ │ │ │ │ │ │ │ │ │ │ ├── constraint: /31: [/'public' - /'public'] + │ │ │ │ │ │ │ │ │ │ │ │ ├── cardinality: [0 - 1] + │ │ │ │ │ │ │ │ │ │ │ │ ├── key: () + │ │ │ │ │ │ │ │ │ │ │ │ └── fd: ()-->(30,31) │ │ │ │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ │ │ │ └── (c.relkind:17 = 'r') OR (c.relkind:17 = 'f') [outer=(17), constraints=(/17: [/'f' - /'f'] [/'r' - /'r']; tight)] - │ │ │ │ │ │ │ │ │ │ ├── scan pg_namespace@pg_namespace_nspname_index [as=n] - │ │ │ │ │ │ │ │ │ │ │ ├── columns: n.oid:30!null n.nspname:31!null - │ │ │ │ │ │ │ │ │ │ │ ├── constraint: /31: [/'public' - /'public'] - │ │ │ │ │ │ │ │ │ │ │ ├── cardinality: [0 - 1] - │ │ │ │ │ │ │ │ │ │ │ ├── key: () - │ │ │ │ │ │ │ │ │ │ │ └── fd: ()-->(30,31) - │ │ │ │ │ │ │ │ │ │ └── filters - │ │ │ │ │ │ │ │ │ │ └── n.oid:30 = c.relnamespace:3 [outer=(3,30), constraints=(/3: (/NULL - ]; /30: (/NULL - ]), fd=(3)==(30), (30)==(3)] + │ │ │ │ │ │ │ │ │ │ │ └── n.oid:30 = c.relnamespace:3 [outer=(3,30), constraints=(/3: (/NULL - ]; /30: (/NULL - ]), fd=(3)==(30), (30)==(3)] + │ │ │ │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ │ └── filters (true) │ │ │ │ │ │ └── filters - │ │ │ │ │ │ └── indrelid:85 = c.oid:1 [outer=(1,85), constraints=(/1: (/NULL - ]; /85: (/NULL - ]), fd=(1)==(85), (85)==(1)] + │ │ │ │ │ │ └── indisclustered:91 [outer=(91), constraints=(/91: [/true - /true]; tight), fd=()-->(91)] │ │ │ │ │ └── filters (true) │ │ │ │ └── filters │ │ │ │ └── i.inhrelid:44 = c.oid:1 [outer=(1,44), constraints=(/1: (/NULL - ]; /44: (/NULL - ]), fd=(1)==(44), (44)==(1)] diff --git a/pkg/sql/opt/xform/testdata/rules/join b/pkg/sql/opt/xform/testdata/rules/join index 4c1e6bb55266..85d1b6c6e5f2 100644 --- a/pkg/sql/opt/xform/testdata/rules/join +++ b/pkg/sql/opt/xform/testdata/rules/join @@ -2324,19 +2324,24 @@ inner-join (lookup abcd) └── c:8 > n:2 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ])] # Non-covering case, extra filter not bound by index, left join. -# In this case, we can't yet convert to a lookup join (see -# the GenerateLookupJoins custom func). -opt expect-not=GenerateLookupJoins +# In this case, we can generate lookup joins as paired-joins. +opt expect=GenerateLookupJoins SELECT * FROM small LEFT JOIN abcd ON a=m AND c>n ---- -right-join (hash) +left-join (lookup abcd) ├── columns: m:1 n:2 a:6 b:7 c:8 - ├── scan abcd - │ └── columns: a:6 b:7 c:8 - ├── scan small - │ └── columns: m:1 n:2 + ├── key columns: [9] = [9] + ├── lookup columns are key + ├── second join in paired joiner + ├── left-join (lookup abcd@abcd_a_b_idx) + │ ├── columns: m:1 n:2 a:6 b:7 abcd.rowid:9 continuation:12 + │ ├── key columns: [1] = [6] + │ ├── first join in paired joiner; continuation column: continuation:12 + │ ├── fd: (9)-->(6,7,12) + │ ├── scan small + │ │ └── columns: m:1 n:2 + │ └── filters (true) └── filters - ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] └── c:8 > n:2 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ])] @@ -2583,19 +2588,25 @@ semi-join (lookup abcd@abcd_a_b_idx) │ └── columns: m:1 n:2 └── filters (true) -# We should not generate a lookup semi-join when the index doesn't contain all -# columns in the join condition. -opt expect-not=GenerateLookupJoins +# We can generate a lookup semi-join when the index doesn't contain all +# columns in the join condition, using paired-joins. +opt expect=GenerateLookupJoins SELECT m, n FROM small WHERE EXISTS(SELECT 1 FROM abcd WHERE m = a AND n = c) ---- -semi-join (hash) +semi-join (lookup abcd) ├── columns: m:1 n:2 - ├── scan small - │ └── columns: m:1 n:2 - ├── scan abcd - │ └── columns: a:6 c:8 + ├── key columns: [9] = [9] + ├── lookup columns are key + ├── second join in paired joiner + ├── inner-join (lookup abcd@abcd_a_b_idx) + │ ├── columns: m:1!null n:2 a:6!null abcd.rowid:9!null continuation:13 + │ ├── key columns: [1] = [6] + │ ├── first join in paired joiner; continuation column: continuation:13 + │ ├── fd: (9)-->(6,13), (1)==(6), (6)==(1) + │ ├── scan small + │ │ └── columns: m:1 n:2 + │ └── filters (true) └── filters - ├── m:1 = a:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] └── n:2 = c:8 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ]), fd=(2)==(8), (8)==(2)] # Lookup anti-join with index that contains all columns in the join condition. @@ -2609,19 +2620,25 @@ anti-join (lookup abcd@abcd_a_b_idx) │ └── columns: m:1 n:2 └── filters (true) -# We should not generate a lookup anti-join when the index doesn't contain all -# columns in the join condition. -opt expect-not=GenerateLookupJoins +# We can generate a lookup anti-join when the index doesn't contain all +# columns in the join condition, using paired-joins. +opt expect=GenerateLookupJoins SELECT m, n FROM small WHERE NOT EXISTS(SELECT 1 FROM abcd WHERE m = a AND n = c) ---- -anti-join (hash) +anti-join (lookup abcd) ├── columns: m:1 n:2 - ├── scan small - │ └── columns: m:1 n:2 - ├── scan abcd - │ └── columns: a:6 c:8 + ├── key columns: [9] = [9] + ├── lookup columns are key + ├── second join in paired joiner + ├── left-join (lookup abcd@abcd_a_b_idx) + │ ├── columns: m:1 n:2 a:6 abcd.rowid:9 continuation:13 + │ ├── key columns: [1] = [6] + │ ├── first join in paired joiner; continuation column: continuation:13 + │ ├── fd: (9)-->(6,13) + │ ├── scan small + │ │ └── columns: m:1 n:2 + │ └── filters (true) └── filters - ├── m:1 = a:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] └── n:2 = c:8 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ]), fd=(2)==(8), (8)==(2)] # Regression test for #59615. Ensure that invalid lookup joins are not created @@ -2830,6 +2847,10 @@ left-join (lookup lookup_expr [as=t]) │ └── filters (true) └── filters (true) +exec-ddl +DROP INDEX idx_vrw +---- + # The OR filter gets converted to an IN expression in the lookup expression # filters. # TODO(rytaft): The OR filter shouldn't get re-applied as an additional filter @@ -3066,23 +3087,27 @@ inner-join (lookup abcd) └── c:8 > n:2 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ])] # Non-covering case, extra filter not bound by index, left join. -# In this case, we can't yet convert to a lookup join (see -# the GenerateLookupJoins custom func). -opt expect-not=GenerateLookupJoinsWithFilter +# In this case, we can generate lookup joins as paired-joins. +opt expect=GenerateLookupJoinsWithFilter SELECT * FROM small LEFT JOIN abcd ON a=m AND c>n AND b>1 ---- -right-join (hash) +left-join (lookup abcd) ├── columns: m:1 n:2 a:6 b:7 c:8 - ├── select - │ ├── columns: a:6 b:7!null c:8 - │ ├── scan abcd - │ │ └── columns: a:6 b:7 c:8 - │ └── filters - │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] - ├── scan small - │ └── columns: m:1 n:2 + ├── key columns: [9] = [9] + ├── lookup columns are key + ├── second join in paired joiner + ├── left-join (lookup abcd@abcd_a_b_idx) + │ ├── columns: m:1 n:2 a:6 b:7 abcd.rowid:9 continuation:12 + │ ├── lookup expression + │ │ └── filters + │ │ ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] + │ │ └── b:7 > 1 [outer=(7), constraints=(/7: [/2 - ]; tight)] + │ ├── first join in paired joiner; continuation column: continuation:12 + │ ├── fd: (9)-->(6,7,12) + │ ├── scan small + │ │ └── columns: m:1 n:2 + │ └── filters (true) └── filters - ├── a:6 = m:1 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] └── c:8 > n:2 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ])] # Constant columns are projected and used by lookup joiner. @@ -3604,22 +3629,26 @@ semi-join (lookup abcd@abcd_a_b_idx) └── filters └── a:6 > b:7 [outer=(6,7), constraints=(/6: (/NULL - ]; /7: (/NULL - ])] -# We should not generate a lookup semi-join when the index is non-covering. -opt expect-not=GenerateLookupJoinsWithFilter +# We can generate a lookup semi-join when the index is non-covering using +# paired-joins. +opt expect=GenerateLookupJoinsWithFilter SELECT m, n FROM small WHERE EXISTS(SELECT 1 FROM abcd WHERE m = a AND n = c AND a > b) ---- -semi-join (hash) +semi-join (lookup abcd) ├── columns: m:1 n:2 - ├── scan small - │ └── columns: m:1 n:2 - ├── select - │ ├── columns: a:6!null b:7!null c:8 - │ ├── scan abcd - │ │ └── columns: a:6 b:7 c:8 + ├── key columns: [9] = [9] + ├── lookup columns are key + ├── second join in paired joiner + ├── inner-join (lookup abcd@abcd_a_b_idx) + │ ├── columns: m:1!null n:2 a:6!null b:7!null abcd.rowid:9!null continuation:13 + │ ├── key columns: [1] = [6] + │ ├── first join in paired joiner; continuation column: continuation:13 + │ ├── fd: (9)-->(6,7,13), (1)==(6), (6)==(1) + │ ├── scan small + │ │ └── columns: m:1 n:2 │ └── filters │ └── a:6 > b:7 [outer=(6,7), constraints=(/6: (/NULL - ]; /7: (/NULL - ])] └── filters - ├── m:1 = a:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] └── n:2 = c:8 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ]), fd=(2)==(8), (8)==(2)] # Lookup anti-join with covering index. @@ -3634,22 +3663,26 @@ anti-join (lookup abcd@abcd_a_b_idx) └── filters └── a:6 > b:7 [outer=(6,7), constraints=(/6: (/NULL - ]; /7: (/NULL - ])] -# We should not generate a lookup semi-join when the index is non-covering. -opt expect-not=GenerateLookupJoinsWithFilter +# We can generate a lookup semi-join when the index is non-covering using +# paired-joins. +opt expect=GenerateLookupJoinsWithFilter SELECT m, n FROM small WHERE NOT EXISTS(SELECT 1 FROM abcd WHERE m = a AND n = c AND a > b) ---- -anti-join (hash) +anti-join (lookup abcd) ├── columns: m:1 n:2 - ├── scan small - │ └── columns: m:1 n:2 - ├── select - │ ├── columns: a:6!null b:7!null c:8 - │ ├── scan abcd - │ │ └── columns: a:6 b:7 c:8 + ├── key columns: [9] = [9] + ├── lookup columns are key + ├── second join in paired joiner + ├── left-join (lookup abcd@abcd_a_b_idx) + │ ├── columns: m:1 n:2 a:6 b:7 abcd.rowid:9 continuation:13 + │ ├── key columns: [1] = [6] + │ ├── first join in paired joiner; continuation column: continuation:13 + │ ├── fd: (9)-->(6,7,13) + │ ├── scan small + │ │ └── columns: m:1 n:2 │ └── filters │ └── a:6 > b:7 [outer=(6,7), constraints=(/6: (/NULL - ]; /7: (/NULL - ])] └── filters - ├── m:1 = a:6 [outer=(1,6), constraints=(/1: (/NULL - ]; /6: (/NULL - ]), fd=(1)==(6), (6)==(1)] └── n:2 = c:8 [outer=(2,8), constraints=(/2: (/NULL - ]; /8: (/NULL - ]), fd=(2)==(8), (8)==(2)] # -------------------------------------------------- @@ -3838,30 +3871,28 @@ project │ └── columns: m:1 n:2 └── filters (true) -# We should not generate a lookup semi-join when the index does not cover "s" -# which is referenced in the remaining filter. -opt expect-not=GenerateLookupJoinsWithFilter +# We can generate a lookup semi-join when the index does not cover "s", +# which is referenced in the remaining filter, by using paired-joins. +opt expect=GenerateLookupJoinsWithFilter SELECT m FROM small WHERE EXISTS (SELECT 1 FROM partial_tab WHERE s = 'foo' AND n = i) ---- project ├── columns: m:1 - └── semi-join (hash) + └── semi-join (lookup partial_tab) ├── columns: m:1 n:2 - ├── scan small - │ └── columns: m:1 n:2 - ├── select - │ ├── columns: i:7 s:8!null - │ ├── fd: ()-->(8) - │ ├── index-join partial_tab - │ │ ├── columns: i:7 s:8 - │ │ └── scan partial_tab@partial_idx,partial - │ │ ├── columns: k:6!null i:7 - │ │ ├── key: (6) - │ │ └── fd: (6)-->(7) - │ └── filters - │ └── s:8 = 'foo' [outer=(8), constraints=(/8: [/'foo' - /'foo']; tight), fd=()-->(8)] + ├── key columns: [6] = [6] + ├── lookup columns are key + ├── second join in paired joiner + ├── inner-join (lookup partial_tab@partial_idx,partial) + │ ├── columns: m:1 n:2!null k:6!null i:7!null continuation:12 + │ ├── key columns: [2] = [7] + │ ├── first join in paired joiner; continuation column: continuation:12 + │ ├── fd: (6)-->(7,12), (2)==(7), (7)==(2) + │ ├── scan small + │ │ └── columns: m:1 n:2 + │ └── filters (true) └── filters - └── n:2 = i:7 [outer=(2,7), constraints=(/2: (/NULL - ]; /7: (/NULL - ]), fd=(2)==(7), (7)==(2)] + └── s:8 = 'foo' [outer=(8), constraints=(/8: [/'foo' - /'foo']; tight), fd=()-->(8)] # Generate a lookup anti-join when the index does not cover "s", but the # reference to "s" no longer exists in the filters. @@ -3877,30 +3908,28 @@ project │ └── columns: m:1 n:2 └── filters (true) -# We should not generate a lookup anti-join when the index does not cover "s" -# which is referenced in the remaining filter. -opt expect-not=GenerateLookupJoinsWithFilter +# We can generate a lookup anti-join when the index does not cover "s", +# which is referenced in the remaining filter, by using paired-joins. +opt expect=GenerateLookupJoinsWithFilter SELECT m FROM small WHERE NOT EXISTS (SELECT 1 FROM partial_tab WHERE s = 'foo' AND n = i) ---- project ├── columns: m:1 - └── anti-join (hash) + └── anti-join (lookup partial_tab) ├── columns: m:1 n:2 - ├── scan small - │ └── columns: m:1 n:2 - ├── select - │ ├── columns: i:7 s:8!null - │ ├── fd: ()-->(8) - │ ├── index-join partial_tab - │ │ ├── columns: i:7 s:8 - │ │ └── scan partial_tab@partial_idx,partial - │ │ ├── columns: k:6!null i:7 - │ │ ├── key: (6) - │ │ └── fd: (6)-->(7) - │ └── filters - │ └── s:8 = 'foo' [outer=(8), constraints=(/8: [/'foo' - /'foo']; tight), fd=()-->(8)] + ├── key columns: [6] = [6] + ├── lookup columns are key + ├── second join in paired joiner + ├── left-join (lookup partial_tab@partial_idx,partial) + │ ├── columns: m:1 n:2 k:6 i:7 continuation:12 + │ ├── key columns: [2] = [7] + │ ├── first join in paired joiner; continuation column: continuation:12 + │ ├── fd: (6)-->(7,12) + │ ├── scan small + │ │ └── columns: m:1 n:2 + │ └── filters (true) └── filters - └── n:2 = i:7 [outer=(2,7), constraints=(/2: (/NULL - ]; /7: (/NULL - ]), fd=(2)==(7), (7)==(2)] + └── s:8 = 'foo' [outer=(8), constraints=(/8: [/'foo' - /'foo']; tight), fd=()-->(8)] # A lookup semi-join on a partial index should have the same cost as a lookup # semi-join on a non-partial index. @@ -5346,7 +5375,7 @@ WHERE n.name = 'Upper West Side' OR n.name = 'Upper East Side' GROUP BY n.name, n.geom ---- -memo (optimized, ~33KB, required=[presentation: name:16,popn_per_sqkm:22]) +memo (optimized, ~34KB, required=[presentation: name:16,popn_per_sqkm:22]) ├── G1: (project G2 G3 name) │ └── [presentation: name:16,popn_per_sqkm:22] │ ├── best: (project G2 G3 name) diff --git a/pkg/sql/opt_exec_factory.go b/pkg/sql/opt_exec_factory.go index 701c4fb03cbe..edf4b4f87222 100644 --- a/pkg/sql/opt_exec_factory.go +++ b/pkg/sql/opt_exec_factory.go @@ -638,6 +638,7 @@ func (ef *execFactory) ConstructLookupJoin( remoteLookupExpr tree.TypedExpr, lookupCols exec.TableColumnOrdinalSet, onCond tree.TypedExpr, + isFirstJoinInPairedJoiner bool, isSecondJoinInPairedJoiner bool, reqOrdering exec.OutputOrdering, locking *tree.LockingItem, @@ -674,6 +675,7 @@ func (ef *execFactory) ConstructLookupJoin( table: tableScan, joinType: joinType, eqColsAreKey: eqColsAreKey, + isFirstJoinInPairedJoiner: isFirstJoinInPairedJoiner, isSecondJoinInPairedJoiner: isSecondJoinInPairedJoiner, reqOrdering: ReqOrdering(reqOrdering), } @@ -692,6 +694,9 @@ func (ef *execFactory) ConstructLookupJoin( n.onCond = pred.iVarHelper.Rebind(onCond) } n.columns = pred.cols + if isFirstJoinInPairedJoiner { + n.columns = append(n.columns, colinfo.ResultColumn{Name: "cont", Typ: types.Bool}) + } return n, nil } diff --git a/pkg/sql/parser/sql.y b/pkg/sql/parser/sql.y index eaca4853068f..7e1ad65960d9 100644 --- a/pkg/sql/parser/sql.y +++ b/pkg/sql/parser/sql.y @@ -4600,6 +4600,10 @@ reset_session_stmt: { $$.val = &tree.SetVar{Name: $3, Values:tree.Exprs{tree.DefaultVal{}}, Reset: true} } +| RESET_ALL ALL + { + $$.val = &tree.SetVar{ResetAll: true, Reset: true} + } | RESET error // SHOW HELP: RESET // %Help: RESET CLUSTER SETTING - reset a cluster setting to its default value diff --git a/pkg/sql/pgwire/conn_test.go b/pkg/sql/pgwire/conn_test.go index 1178059bdeb4..9b30a1a28635 100644 --- a/pkg/sql/pgwire/conn_test.go +++ b/pkg/sql/pgwire/conn_test.go @@ -1754,6 +1754,19 @@ func TestRoleDefaultSettings(t *testing.T) { setupStmt: "ALTER ROLE testuser IN DATABASE defaultdb SET search_path = 'f'", expectedSearchPath: "f", }, + { + // RESET after connecting should go back to the per-role default setting. + setupStmt: "", + postConnectStmt: "SET search_path = 'new'; RESET search_path;", + expectedSearchPath: "f", + }, + { + // RESET should use the query param as the default if it was provided. + setupStmt: "", + searchPathOptOverride: "g", + postConnectStmt: "SET search_path = 'new'; RESET ALL;", + expectedSearchPath: "g", + }, { setupStmt: "ALTER ROLE testuser IN DATABASE defaultdb SET search_path = DEFAULT", expectedSearchPath: "c", diff --git a/pkg/sql/set_var.go b/pkg/sql/set_var.go index c355849a02a0..0249ed00e25a 100644 --- a/pkg/sql/set_var.go +++ b/pkg/sql/set_var.go @@ -39,10 +39,16 @@ type setVarNode struct { typedValues []tree.TypedExpr } +// resetAllNode represents a RESET ALL statement. +type resetAllNode struct{} + // SetVar sets session variables. // Privileges: None. // Notes: postgres/mysql do not require privileges for session variables (some exceptions). func (p *planner) SetVar(ctx context.Context, n *tree.SetVar) (planNode, error) { + if n.ResetAll { + return &resetAllNode{}, nil + } if n.Name == "" { // A client has sent the reserved internal syntax SET ROW ..., // or the user entered `SET "" = foo`. Reject it. @@ -182,6 +188,45 @@ func (n *setVarNode) Next(_ runParams) (bool, error) { return false, nil } func (n *setVarNode) Values() tree.Datums { return nil } func (n *setVarNode) Close(_ context.Context) {} +func (n *resetAllNode) startExec(params runParams) error { + for varName, v := range varGen { + if v.Set == nil && v.RuntimeSet == nil && v.SetWithPlanner == nil { + continue + } + // For Postgres compatibility, Don't reset `role` here. + if varName == "role" { + continue + } + _, defVal := getSessionVarDefaultString( + varName, + v, + params.p.sessionDataMutatorIterator.sessionDataMutatorBase, + ) + if err := params.p.SetSessionVar(params.ctx, varName, defVal, false /* isLocal */); err != nil { + return err + } + } + for varName := range params.SessionData().CustomOptions { + _, v, err := getSessionVar(varName, false /* missingOK */) + if err != nil { + return err + } + _, defVal := getSessionVarDefaultString( + varName, + v, + params.p.sessionDataMutatorIterator.sessionDataMutatorBase, + ) + if err := params.p.SetSessionVar(params.ctx, varName, defVal, false /* isLocal */); err != nil { + return err + } + } + return nil +} + +func (n *resetAllNode) Next(_ runParams) (bool, error) { return false, nil } +func (n *resetAllNode) Values() tree.Datums { return nil } +func (n *resetAllNode) Close(_ context.Context) {} + func getStringVal(evalCtx *tree.EvalContext, name string, values []tree.TypedExpr) (string, error) { if len(values) != 1 { return "", newSingleArgVarError(name) diff --git a/pkg/sql/walk.go b/pkg/sql/walk.go index 76d54cdd4721..041b7aa1e4d8 100644 --- a/pkg/sql/walk.go +++ b/pkg/sql/walk.go @@ -218,6 +218,7 @@ func (v *planVisitor) visitInternal(plan planNode, name string) { case *createViewNode: case *setVarNode: case *setClusterSettingNode: + case *resetAllNode: case *delayedNode: if n.plan != nil { @@ -416,6 +417,7 @@ var planNodeNames = map[reflect.Type]string{ reflect.TypeOf(&renameTableNode{}): "rename table", reflect.TypeOf(&reparentDatabaseNode{}): "reparent database", reflect.TypeOf(&renderNode{}): "render", + reflect.TypeOf(&resetAllNode{}): "reset all", reflect.TypeOf(&RevokeRoleNode{}): "revoke role", reflect.TypeOf(&rowCountNode{}): "count", reflect.TypeOf(&rowSourceToPlanNode{}): "row source to plan node", diff --git a/pkg/util/timeutil/time_zone_util.go b/pkg/util/timeutil/time_zone_util.go index 63e1072435dc..e4a9a580ed38 100644 --- a/pkg/util/timeutil/time_zone_util.go +++ b/pkg/util/timeutil/time_zone_util.go @@ -23,7 +23,8 @@ import ( const ( offsetBoundSecs = 167*60*60 + 59*60 // PG supports UTC hour offsets in the range [-167, 167]. - maxUTCHourOffset = 167 + maxUTCHourOffset = 167 + maxUTCHourOffsetInSeconds = maxUTCHourOffset * 60 * 60 ) var timezoneOffsetRegex = regexp.MustCompile(`(?i)^(GMT|UTC)?([+-])?(\d{1,3}(:[0-5]?\d){0,2})$`) @@ -69,6 +70,9 @@ func TimeZoneStringToLocation( ) (*time.Location, error) { offset, _, parsed := ParseTimeZoneOffset(locStr, std) if parsed { + if offset < -maxUTCHourOffsetInSeconds || offset > maxUTCHourOffsetInSeconds { + return nil, errors.New("UTC timezone offset is out of range.") + } return TimeZoneOffsetToLocation(offset), nil } diff --git a/vendor b/vendor index 6c582acbc4a7..ed53f102e86b 160000 --- a/vendor +++ b/vendor @@ -1 +1 @@ -Subproject commit 6c582acbc4a78cdeaf1b117e7df8527e881b90c7 +Subproject commit ed53f102e86b51c228b61d3ad54f1428982fe75e